From c7095be9136825efc5bd85181b0395b833f96aee Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 19 Jul 2019 17:49:19 +0100 Subject: [PATCH 01/72] Refactor Keyring._start_key_lookups There's an awful lot of deferreds and dictionaries flying around here. The whole thing can be made much simpler and achieve the same effect. --- synapse/crypto/keyring.py | 84 +++++++++++++++--------------------- tests/crypto/test_keyring.py | 29 ------------- 2 files changed, 34 insertions(+), 79 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 341c863152..efa72dc5fc 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -238,27 +238,9 @@ class Keyring(object): """ try: - # create a deferred for each server we're going to look up the keys - # for; we'll resolve them once we have completed our lookups. - # These will be passed into wait_for_previous_lookups to block - # any other lookups until we have finished. - # The deferreds are called with no logcontext. - server_to_deferred = { - rq.server_name: defer.Deferred() for rq in verify_requests - } + ctx = LoggingContext.current_context() - # We want to wait for any previous lookups to complete before - # proceeding. - yield self.wait_for_previous_lookups(server_to_deferred) - - # Actually start fetching keys. - self._get_server_verify_keys(verify_requests) - - # When we've finished fetching all the keys for a given server_name, - # resolve the deferred passed to `wait_for_previous_lookups` so that - # any lookups waiting will proceed. - # - # map from server name to a set of request ids + # map from server name to a set of outstanding request ids server_to_request_ids = {} for verify_request in verify_requests: @@ -266,40 +248,55 @@ class Keyring(object): request_id = id(verify_request) server_to_request_ids.setdefault(server_name, set()).add(request_id) - def remove_deferreds(res, verify_request): + # Wait for any previous lookups to complete before proceeding. + yield self.wait_for_previous_lookups(server_to_request_ids.keys()) + + # take out a lock on each of the servers by sticking a Deferred in + # key_downloads + for server_name in server_to_request_ids.keys(): + self.key_downloads[server_name] = defer.Deferred() + logger.debug("Got key lookup lock on %s", server_name) + + # When we've finished fetching all the keys for a given server_name, + # drop the lock by resolving the deferred in key_downloads. + def lookup_done(res, verify_request): server_name = verify_request.server_name - request_id = id(verify_request) - server_to_request_ids[server_name].discard(request_id) - if not server_to_request_ids[server_name]: - d = server_to_deferred.pop(server_name, None) - if d: - d.callback(None) + server_requests = server_to_request_ids[server_name] + server_requests.remove(id(verify_request)) + + # if there are no more requests for this server, we can drop the lock. + if not server_requests: + with PreserveLoggingContext(ctx): + logger.debug("Releasing key lookup lock on %s", server_name) + + d = self.key_downloads.pop(server_name) + d.callback(None) return res for verify_request in verify_requests: - verify_request.key_ready.addBoth(remove_deferreds, verify_request) + verify_request.key_ready.addBoth(lookup_done, verify_request) + + # Actually start fetching keys. + self._get_server_verify_keys(verify_requests) except Exception: logger.exception("Error starting key lookups") @defer.inlineCallbacks - def wait_for_previous_lookups(self, server_to_deferred): + def wait_for_previous_lookups(self, server_names): """Waits for any previous key lookups for the given servers to finish. Args: - server_to_deferred (dict[str, Deferred]): server_name to deferred which gets - resolved once we've finished looking up keys for that server. - The Deferreds should be regular twisted ones which call their - callbacks with no logcontext. + server_names (Iterable[str]): list of servers which we want to look up - Returns: a Deferred which resolves once all key lookups for the given - servers have completed. Follows the synapse rules of logcontext - preservation. + Returns: + Deferred[None]: resolves once all key lookups for the given servers have + completed. Follows the synapse rules of logcontext preservation. """ loop_count = 1 while True: wait_on = [ (server_name, self.key_downloads[server_name]) - for server_name in server_to_deferred.keys() + for server_name in server_names if server_name in self.key_downloads ] if not wait_on: @@ -314,19 +311,6 @@ class Keyring(object): loop_count += 1 - ctx = LoggingContext.current_context() - - def rm(r, server_name_): - with PreserveLoggingContext(ctx): - logger.debug("Releasing key lookup lock on %s", server_name_) - self.key_downloads.pop(server_name_, None) - return r - - for server_name, deferred in server_to_deferred.items(): - logger.debug("Got key lookup lock on %s", server_name) - self.key_downloads[server_name] = deferred - deferred.addBoth(rm, server_name) - def _get_server_verify_keys(self, verify_requests): """Tries to find at least one key for each verify request diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 795703967d..8d94a503d6 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -86,35 +86,6 @@ class KeyringTestCase(unittest.HomeserverTestCase): getattr(LoggingContext.current_context(), "request", None), expected ) - def test_wait_for_previous_lookups(self): - kr = keyring.Keyring(self.hs) - - lookup_1_deferred = defer.Deferred() - lookup_2_deferred = defer.Deferred() - - # we run the lookup in a logcontext so that the patched inlineCallbacks can check - # it is doing the right thing with logcontexts. - wait_1_deferred = run_in_context( - kr.wait_for_previous_lookups, {"server1": lookup_1_deferred} - ) - - # there were no previous lookups, so the deferred should be ready - self.successResultOf(wait_1_deferred) - - # set off another wait. It should block because the first lookup - # hasn't yet completed. - wait_2_deferred = run_in_context( - kr.wait_for_previous_lookups, {"server1": lookup_2_deferred} - ) - - self.assertFalse(wait_2_deferred.called) - - # let the first lookup complete (in the sentinel context) - lookup_1_deferred.callback(None) - - # now the second wait should complete. - self.successResultOf(wait_2_deferred) - def test_verify_json_objects_for_server_awaits_previous_requests(self): key1 = signedjson.key.generate_signing_key(1) From dcca56babad3a42ac9967995f7e6f9db51e37353 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 19 Jul 2019 17:57:00 +0100 Subject: [PATCH 02/72] Add a delay to key lookup lock release to fix stack overflow A tactical call_later here should fix #5723 --- synapse/crypto/keyring.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index efa72dc5fc..e8bb420ad1 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -259,6 +259,10 @@ class Keyring(object): # When we've finished fetching all the keys for a given server_name, # drop the lock by resolving the deferred in key_downloads. + def drop_server_lock(server_name): + d = self.key_downloads.pop(server_name) + d.callback(None) + def lookup_done(res, verify_request): server_name = verify_request.server_name server_requests = server_to_request_ids[server_name] @@ -269,8 +273,10 @@ class Keyring(object): with PreserveLoggingContext(ctx): logger.debug("Releasing key lookup lock on %s", server_name) - d = self.key_downloads.pop(server_name) - d.callback(None) + # ... but not immediately, as that can cause stack explosions if + # we get a long queue of lookups. + self.clock.call_later(0, drop_server_lock, server_name) + return res for verify_request in verify_requests: From f214bff0c0af157429525098fb6ebb9ca0579fcd Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 19 Jul 2019 17:58:17 +0100 Subject: [PATCH 03/72] changelog --- changelog.d/5724.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5724.bugfix diff --git a/changelog.d/5724.bugfix b/changelog.d/5724.bugfix new file mode 100644 index 0000000000..1b3683daf6 --- /dev/null +++ b/changelog.d/5724.bugfix @@ -0,0 +1 @@ +Fix stack overflow in server key lookup code. \ No newline at end of file From 826e6ec3bdfca92a5815f032c3246fab9a2aef88 Mon Sep 17 00:00:00 2001 From: Jorik Schellekens Date: Mon, 22 Jul 2019 11:15:21 +0100 Subject: [PATCH 04/72] Opentracing Documentation (#5703) * Opentracing survival guide * Update decorator names in doc * Doc cleanup These are all alterations as a result of comments in #5703, it includes mostly typos and clarifications. The most interesting changes are: - Split developer and user docs into two sections - Add a high level description of OpenTracing * newsfile * Move contributer specific info to docstring. * Sample config. * Trailing whitespace. * Update 5703.misc * Apply suggestions from code review Mostly just rewording parts of the docs for clarity. Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/5703.misc | 1 + docs/opentracing.rst | 100 ++++++++++++++++++++++++++ docs/sample_config.yaml | 14 +--- synapse/config/tracer.py | 14 +--- synapse/logging/opentracing.py | 125 +++++++++++++++++++++++++++++++++ 5 files changed, 230 insertions(+), 24 deletions(-) create mode 100644 changelog.d/5703.misc create mode 100644 docs/opentracing.rst diff --git a/changelog.d/5703.misc b/changelog.d/5703.misc new file mode 100644 index 0000000000..6e9b2d734e --- /dev/null +++ b/changelog.d/5703.misc @@ -0,0 +1 @@ +Documentation for opentracing. diff --git a/docs/opentracing.rst b/docs/opentracing.rst new file mode 100644 index 0000000000..b91a2208a8 --- /dev/null +++ b/docs/opentracing.rst @@ -0,0 +1,100 @@ +=========== +OpenTracing +=========== + +Background +---------- + +OpenTracing is a semi-standard being adopted by a number of distributed tracing +platforms. It is a common api for facilitating vendor-agnostic tracing +instrumentation. That is, we can use the OpenTracing api and select one of a +number of tracer implementations to do the heavy lifting in the background. +Our current selected implementation is Jaeger. + +OpenTracing is a tool which gives an insight into the causal relationship of +work done in and between servers. The servers each track events and report them +to a centralised server - in Synapse's case: Jaeger. The basic unit used to +represent events is the span. The span roughly represents a single piece of work +that was done and the time at which it occurred. A span can have child spans, +meaning that the work of the child had to be completed for the parent span to +complete, or it can have follow-on spans which represent work that is undertaken +as a result of the parent but is not depended on by the parent to in order to +finish. + +Since this is undertaken in a distributed environment a request to another +server, such as an RPC or a simple GET, can be considered a span (a unit or +work) for the local server. This causal link is what OpenTracing aims to +capture and visualise. In order to do this metadata about the local server's +span, i.e the 'span context', needs to be included with the request to the +remote. + +It is up to the remote server to decide what it does with the spans +it creates. This is called the sampling policy and it can be configured +through Jaeger's settings. + +For OpenTracing concepts see +https://opentracing.io/docs/overview/what-is-tracing/. + +For more information about Jaeger's implementation see +https://www.jaegertracing.io/docs/ + +===================== +Seting up OpenTracing +===================== + +To receive OpenTracing spans, start up a Jaeger server. This can be done +using docker like so: + +.. code-block:: bash + + docker run -d --name jaeger + -p 6831:6831/udp \ + -p 6832:6832/udp \ + -p 5778:5778 \ + -p 16686:16686 \ + -p 14268:14268 \ + jaegertracing/all-in-one:1.13 + +Latest documentation is probably at +https://www.jaegertracing.io/docs/1.13/getting-started/ + + +Enable OpenTracing in Synapse +----------------------------- + +OpenTracing is not enabled by default. It must be enabled in the homeserver +config by uncommenting the config options under ``opentracing`` as shown in +the `sample config <./sample_config.yaml>`_. For example: + +.. code-block:: yaml + + opentracing: + tracer_enabled: true + homeserver_whitelist: + - "mytrustedhomeserver.org" + - "*.myotherhomeservers.com" + +Homeserver whitelisting +----------------------- + +The homeserver whitelist is configured using regular expressions. A list of regular +expressions can be given and their union will be compared when propagating any +spans contexts to another homeserver. + +Though it's mostly safe to send and receive span contexts to and from +untrusted users since span contexts are usually opaque ids it can lead to +two problems, namely: + +- If the span context is marked as sampled by the sending homeserver the receiver will + sample it. Therefore two homeservers with wildly different sampling policies + could incur higher sampling counts than intended. +- Sending servers can attach arbitrary data to spans, known as 'baggage'. For safety this has been disabled in Synapse + but that doesn't prevent another server sending you baggage which will be logged + to OpenTracing's logs. + +================== +Configuring Jaeger +================== + +Sampling strategies can be set as in this document: +https://www.jaegertracing.io/docs/1.13/sampling/ diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 5b804d16a4..0a96197ca6 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1422,18 +1422,8 @@ opentracing: #enabled: true # The list of homeservers we wish to send and receive span contexts and span baggage. - # - # Though it's mostly safe to send and receive span contexts to and from - # untrusted users since span contexts are usually opaque ids it can lead to - # two problems, namely: - # - If the span context is marked as sampled by the sending homeserver the receiver will - # sample it. Therefore two homeservers with wildly disparaging sampling policies - # could incur higher sampling counts than intended. - # - Span baggage can be arbitrary data. For safety this has been disabled in synapse - # but that doesn't prevent another server sending you baggage which will be logged - # to opentracing logs. - # - # This a list of regexes which are matched against the server_name of the + # See docs/opentracing.rst + # This is a list of regexes which are matched against the server_name of the # homeserver. # # By defult, it is empty, so no servers are matched. diff --git a/synapse/config/tracer.py b/synapse/config/tracer.py index a2ce9ab3f6..4479454415 100644 --- a/synapse/config/tracer.py +++ b/synapse/config/tracer.py @@ -48,18 +48,8 @@ class TracerConfig(Config): #enabled: true # The list of homeservers we wish to send and receive span contexts and span baggage. - # - # Though it's mostly safe to send and receive span contexts to and from - # untrusted users since span contexts are usually opaque ids it can lead to - # two problems, namely: - # - If the span context is marked as sampled by the sending homeserver the receiver will - # sample it. Therefore two homeservers with wildly disparaging sampling policies - # could incur higher sampling counts than intended. - # - Span baggage can be arbitrary data. For safety this has been disabled in synapse - # but that doesn't prevent another server sending you baggage which will be logged - # to opentracing logs. - # - # This a list of regexes which are matched against the server_name of the + # See docs/opentracing.rst + # This is a list of regexes which are matched against the server_name of the # homeserver. # # By defult, it is empty, so no servers are matched. diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 415040f5ee..3da33d7826 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -24,6 +24,131 @@ # this move the methods have work very similarly to opentracing's and it should only # be a matter of few regexes to move over to opentracing's access patterns proper. +""" +============================ +Using OpenTracing in Synapse +============================ + +Python-specific tracing concepts are at https://opentracing.io/guides/python/. +Note that Synapse wraps OpenTracing in a small module (this one) in order to make the +OpenTracing dependency optional. That means that the access patterns are +different to those demonstrated in the OpenTracing guides. However, it is +still useful to know, especially if OpenTracing is included as a full dependency +in the future or if you are modifying this module. + + +OpenTracing is encapsulated so that +no span objects from OpenTracing are exposed in Synapse's code. This allows +OpenTracing to be easily disabled in Synapse and thereby have OpenTracing as +an optional dependency. This does however limit the number of modifiable spans +at any point in the code to one. From here out references to `opentracing` +in the code snippets refer to the Synapses module. + +Tracing +------- + +In Synapse it is not possible to start a non-active span. Spans can be started +using the ``start_active_span`` method. This returns a scope (see +OpenTracing docs) which is a context manager that needs to be entered and +exited. This is usually done by using ``with``. + +.. code-block:: python + + from synapse.logging.opentracing import start_active_span + + with start_active_span("operation name"): + # Do something we want to tracer + +Forgetting to enter or exit a scope will result in some mysterious and grievous log +context errors. + +At anytime where there is an active span ``opentracing.set_tag`` can be used to +set a tag on the current active span. + +Tracing functions +----------------- + +Functions can be easily traced using decorators. There is a decorator for +'normal' function and for functions which are actually deferreds. The name of +the function becomes the operation name for the span. + +.. code-block:: python + + from synapse.logging.opentracing import trace, trace_deferred + + # Start a span using 'normal_function' as the operation name + @trace + def normal_function(*args, **kwargs): + # Does all kinds of cool and expected things + return something_usual_and_useful + + # Start a span using 'deferred_function' as the operation name + @trace_deferred + @defer.inlineCallbacks + def deferred_function(*args, **kwargs): + # We start + yield we_wait + # we finish + defer.returnValue(something_usual_and_useful) + +Operation names can be explicitly set for functions by using +``trace_using_operation_name`` and +``trace_deferred_using_operation_name`` + +.. code-block:: python + + from synapse.logging.opentracing import ( + trace_using_operation_name, + trace_deferred_using_operation_name + ) + + @trace_using_operation_name("A *much* better operation name") + def normal_function(*args, **kwargs): + # Does all kinds of cool and expected things + return something_usual_and_useful + + @trace_deferred_using_operation_name("Another exciting operation name!") + @defer.inlineCallbacks + def deferred_function(*args, **kwargs): + # We start + yield we_wait + # we finish + defer.returnValue(something_usual_and_useful) + +Contexts and carriers +--------------------- + +There are a selection of wrappers for injecting and extracting contexts from +carriers provided. Unfortunately OpenTracing's three context injection +techniques are not adequate for our inject of OpenTracing span-contexts into +Twisted's http headers, EDU contents and our database tables. Also note that +the binary encoding format mandated by OpenTracing is not actually implemented +by jaeger_client v4.0.0 - it will silently noop. +Please refer to the end of ``logging/opentracing.py`` for the available +injection and extraction methods. + +Homeserver whitelisting +----------------------- + +Most of the whitelist checks are encapsulated in the modules's injection +and extraction method but be aware that using custom carriers or crossing +unchartered waters will require the enforcement of the whitelist. +``logging/opentracing.py`` has a ``whitelisted_homeserver`` method which takes +in a destination and compares it to the whitelist. + +======= +Gotchas +======= + +- Checking whitelists on span propagation +- Inserting pii +- Forgetting to enter or exit a scope +- Span source: make sure that the span you expect to be active across a + function call really will be that one. Does the current function have more + than one caller? Will all of those calling functions have be in a context + with an active span? +""" + import contextlib import logging import re From f337d2f0f089e5b53e13c85fef0b89e93defa5e5 Mon Sep 17 00:00:00 2001 From: Jorik Schellekens Date: Mon, 22 Jul 2019 11:31:05 +0100 Subject: [PATCH 05/72] Demo uses deprecated cli option (#5725) * Remove deprecated 'verbose' cli arg * Create 5725.bugfix --- changelog.d/5725.bugfix | 1 + demo/start.sh | 7 +++---- 2 files changed, 4 insertions(+), 4 deletions(-) create mode 100644 changelog.d/5725.bugfix diff --git a/changelog.d/5725.bugfix b/changelog.d/5725.bugfix new file mode 100644 index 0000000000..73ef419727 --- /dev/null +++ b/changelog.d/5725.bugfix @@ -0,0 +1 @@ +start.sh no longer uses deprecated cli option. diff --git a/demo/start.sh b/demo/start.sh index 1c4f12d0bb..eccaa2abeb 100755 --- a/demo/start.sh +++ b/demo/start.sh @@ -29,7 +29,7 @@ for port in 8080 8081 8082; do if ! grep -F "Customisation made by demo/start.sh" -q $DIR/etc/$port.config; then printf '\n\n# Customisation made by demo/start.sh\n' >> $DIR/etc/$port.config - + echo 'enable_registration: true' >> $DIR/etc/$port.config # Warning, this heredoc depends on the interaction of tabs and spaces. Please don't @@ -43,7 +43,7 @@ for port in 8080 8081 8082; do tls: true resources: - names: [client, federation] - + - port: $port tls: false bind_addresses: ['::1', '127.0.0.1'] @@ -68,7 +68,7 @@ for port in 8080 8081 8082; do # Generate tls keys openssl req -x509 -newkey rsa:4096 -keyout $DIR/etc/localhost\:$https_port.tls.key -out $DIR/etc/localhost\:$https_port.tls.crt -days 365 -nodes -subj "/O=matrix" - + # Ignore keys from the trusted keys server echo '# Ignore keys from the trusted keys server' >> $DIR/etc/$port.config echo 'trusted_key_servers:' >> $DIR/etc/$port.config @@ -120,7 +120,6 @@ for port in 8080 8081 8082; do python3 -m synapse.app.homeserver \ --config-path "$DIR/etc/$port.config" \ -D \ - -vv \ popd done From 54437c48ca0dcc745f11a362ce7dc7267b568896 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Mon, 22 Jul 2019 12:59:04 +0100 Subject: [PATCH 06/72] 1.2.0rc1 --- CHANGES.md | 81 ++++++++++++++++++++++++++++++++++++++++ changelog.d/5397.doc | 1 - changelog.d/5544.feature | 2 - changelog.d/5589.feature | 1 - changelog.d/5597.feature | 1 - changelog.d/5606.misc | 1 - changelog.d/5609.bugfix | 1 - changelog.d/5611.misc | 1 - changelog.d/5613.feature | 1 - changelog.d/5616.misc | 1 - changelog.d/5617.misc | 1 - changelog.d/5619.docker | 1 - changelog.d/5620.docker | 1 - changelog.d/5621.bugfix | 1 - changelog.d/5622.misc | 1 - changelog.d/5623.feature | 1 - changelog.d/5625.removal | 1 - changelog.d/5626.feature | 1 - changelog.d/5627.misc | 1 - changelog.d/5628.misc | 1 - changelog.d/5629.bugfix | 1 - changelog.d/5630.misc | 1 - changelog.d/5636.misc | 1 - changelog.d/5637.misc | 1 - changelog.d/5638.bugfix | 1 - changelog.d/5639.misc | 1 - changelog.d/5640.misc | 1 - changelog.d/5641.misc | 1 - changelog.d/5642.misc | 1 - changelog.d/5643.misc | 1 - changelog.d/5644.bugfix | 1 - changelog.d/5645.misc | 1 - changelog.d/5651.doc | 1 - changelog.d/5654.bugfix | 1 - changelog.d/5655.misc | 1 - changelog.d/5656.misc | 1 - changelog.d/5657.misc | 1 - changelog.d/5658.bugfix | 1 - changelog.d/5659.misc | 1 - changelog.d/5660.feature | 1 - changelog.d/5661.doc | 1 - changelog.d/5664.misc | 1 - changelog.d/5673.misc | 1 - changelog.d/5674.feature | 1 - changelog.d/5675.doc | 1 - changelog.d/5689.misc | 1 - changelog.d/5699.bugfix | 1 - changelog.d/5700.bugfix | 2 - changelog.d/5701.bugfix | 1 - changelog.d/5703.misc | 1 - changelog.d/5707.bugfix | 1 - changelog.d/5712.feature | 2 - changelog.d/5714.feature | 1 - synapse/__init__.py | 2 +- 54 files changed, 82 insertions(+), 56 deletions(-) delete mode 100644 changelog.d/5397.doc delete mode 100644 changelog.d/5544.feature delete mode 100644 changelog.d/5589.feature delete mode 100644 changelog.d/5597.feature delete mode 100644 changelog.d/5606.misc delete mode 100644 changelog.d/5609.bugfix delete mode 100644 changelog.d/5611.misc delete mode 100644 changelog.d/5613.feature delete mode 100644 changelog.d/5616.misc delete mode 100644 changelog.d/5617.misc delete mode 100644 changelog.d/5619.docker delete mode 100644 changelog.d/5620.docker delete mode 100644 changelog.d/5621.bugfix delete mode 100644 changelog.d/5622.misc delete mode 100644 changelog.d/5623.feature delete mode 100644 changelog.d/5625.removal delete mode 100644 changelog.d/5626.feature delete mode 100644 changelog.d/5627.misc delete mode 100644 changelog.d/5628.misc delete mode 100644 changelog.d/5629.bugfix delete mode 100644 changelog.d/5630.misc delete mode 100644 changelog.d/5636.misc delete mode 100644 changelog.d/5637.misc delete mode 100644 changelog.d/5638.bugfix delete mode 100644 changelog.d/5639.misc delete mode 100644 changelog.d/5640.misc delete mode 100644 changelog.d/5641.misc delete mode 100644 changelog.d/5642.misc delete mode 100644 changelog.d/5643.misc delete mode 100644 changelog.d/5644.bugfix delete mode 100644 changelog.d/5645.misc delete mode 100644 changelog.d/5651.doc delete mode 100644 changelog.d/5654.bugfix delete mode 100644 changelog.d/5655.misc delete mode 100644 changelog.d/5656.misc delete mode 100644 changelog.d/5657.misc delete mode 100644 changelog.d/5658.bugfix delete mode 100644 changelog.d/5659.misc delete mode 100644 changelog.d/5660.feature delete mode 100644 changelog.d/5661.doc delete mode 100644 changelog.d/5664.misc delete mode 100644 changelog.d/5673.misc delete mode 100644 changelog.d/5674.feature delete mode 100644 changelog.d/5675.doc delete mode 100644 changelog.d/5689.misc delete mode 100644 changelog.d/5699.bugfix delete mode 100644 changelog.d/5700.bugfix delete mode 100644 changelog.d/5701.bugfix delete mode 100644 changelog.d/5703.misc delete mode 100644 changelog.d/5707.bugfix delete mode 100644 changelog.d/5712.feature delete mode 100644 changelog.d/5714.feature diff --git a/CHANGES.md b/CHANGES.md index dc8c74fe58..e42fe1ba4d 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,84 @@ +Synapse 1.2.0rc1 (2019-07-22) +============================= + +Features +-------- + +- Add support for opentracing. ([\#5544](https://github.com/matrix-org/synapse/issues/5544), [\#5712](https://github.com/matrix-org/synapse/issues/5712)) +- Add ability to pull all locally stored events out of synapse that a particular user can see. ([\#5589](https://github.com/matrix-org/synapse/issues/5589)) +- Add a basic admin command app to allow server operators to run Synapse admin commands separately from the main production instance. ([\#5597](https://github.com/matrix-org/synapse/issues/5597)) +- Add `sender` and `origin_server_ts` fields to `m.replace`. ([\#5613](https://github.com/matrix-org/synapse/issues/5613)) +- Add default push rule to ignore reactions. ([\#5623](https://github.com/matrix-org/synapse/issues/5623)) +- Include the original event when asking for its relations. ([\#5626](https://github.com/matrix-org/synapse/issues/5626)) +- Implement `session_lifetime` configuration option, after which access tokens will expire. ([\#5660](https://github.com/matrix-org/synapse/issues/5660)) +- Return "This account has been deactivated" when a deactivated user tries to login. ([\#5674](https://github.com/matrix-org/synapse/issues/5674)) +- Enable aggregations support by default ([\#5714](https://github.com/matrix-org/synapse/issues/5714)) + + +Bugfixes +-------- + +- Fix 'utime went backwards' errors on daemonization. ([\#5609](https://github.com/matrix-org/synapse/issues/5609)) +- Various minor fixes to the federation request rate limiter. ([\#5621](https://github.com/matrix-org/synapse/issues/5621)) +- Forbid viewing relations on an event once it has been redacted. ([\#5629](https://github.com/matrix-org/synapse/issues/5629)) +- Fix requests to the `/store_invite` endpoint of identity servers being sent in the wrong format. ([\#5638](https://github.com/matrix-org/synapse/issues/5638)) +- Fix newly-registered users not being able to lookup their own profile without joining a room. ([\#5644](https://github.com/matrix-org/synapse/issues/5644)) +- Fix bug in #5626 that prevented the original_event field from actually having the contents of the original event in a call to `/relations`. ([\#5654](https://github.com/matrix-org/synapse/issues/5654)) +- Fix 3PID bind requests being sent to identity servers as `application/x-form-www-urlencoded` data, which is deprecated. ([\#5658](https://github.com/matrix-org/synapse/issues/5658)) +- Fix some problems with authenticating redactions in recent room versions. ([\#5699](https://github.com/matrix-org/synapse/issues/5699), [\#5700](https://github.com/matrix-org/synapse/issues/5700), [\#5707](https://github.com/matrix-org/synapse/issues/5707)) +- Ignore redactions of m.room.create events. ([\#5701](https://github.com/matrix-org/synapse/issues/5701)) + + +Updates to the Docker image +--------------------------- + +- Base Docker image on a newer Alpine Linux version (3.8 -> 3.10). ([\#5619](https://github.com/matrix-org/synapse/issues/5619)) +- Add missing space in default logging file format generated by the Docker image. ([\#5620](https://github.com/matrix-org/synapse/issues/5620)) + + +Improved Documentation +---------------------- + +- Add information about nginx normalisation to reverse_proxy.rst. Contributed by @skalarproduktraum - thanks! ([\#5397](https://github.com/matrix-org/synapse/issues/5397)) +- --no-pep517 should be --no-use-pep517 in the documentation to setup the development environment. ([\#5651](https://github.com/matrix-org/synapse/issues/5651)) +- Improvements to Postgres setup instructions. Contributed by @Lrizika - thanks! ([\#5661](https://github.com/matrix-org/synapse/issues/5661)) +- Minor tweaks to postgres documentation. ([\#5675](https://github.com/matrix-org/synapse/issues/5675)) + + +Deprecations and Removals +------------------------- + +- Remove support for the `invite_3pid_guest` configuration setting. ([\#5625](https://github.com/matrix-org/synapse/issues/5625)) + + +Internal Changes +---------------- + +- Move logging code out of `synapse.util` and into `synapse.logging`. ([\#5606](https://github.com/matrix-org/synapse/issues/5606), [\#5617](https://github.com/matrix-org/synapse/issues/5617)) +- Add a blacklist file to the repo to blacklist certain sytests from failing CI. ([\#5611](https://github.com/matrix-org/synapse/issues/5611)) +- Make runtime errors surrounding password reset emails much clearer. ([\#5616](https://github.com/matrix-org/synapse/issues/5616)) +- Remove dead code for persiting outgoing federation transactions. ([\#5622](https://github.com/matrix-org/synapse/issues/5622)) +- Add `lint.sh` to the scripts-dev folder which will run all linting steps required by CI. ([\#5627](https://github.com/matrix-org/synapse/issues/5627)) +- Move RegistrationHandler.get_or_create_user to test code. ([\#5628](https://github.com/matrix-org/synapse/issues/5628)) +- Add some more common python virtual-environment paths to the black exclusion list. ([\#5630](https://github.com/matrix-org/synapse/issues/5630)) +- Some counter metrics exposed over Prometheus have been renamed, with the old names preserved for backwards compatibility and deprecated. See `docs/metrics-howto.rst` for details. ([\#5636](https://github.com/matrix-org/synapse/issues/5636)) +- Unblacklist some user_directory sytests. ([\#5637](https://github.com/matrix-org/synapse/issues/5637)) +- Factor out some redundant code in the login implementation. ([\#5639](https://github.com/matrix-org/synapse/issues/5639)) +- Update ModuleApi to avoid register(generate_token=True). ([\#5640](https://github.com/matrix-org/synapse/issues/5640)) +- Remove access-token support from RegistrationHandler.register, and rename it. ([\#5641](https://github.com/matrix-org/synapse/issues/5641)) +- Remove access-token support from `RegistrationStore.register`, and rename it. ([\#5642](https://github.com/matrix-org/synapse/issues/5642)) +- Improve logging for auto-join when a new user is created. ([\#5643](https://github.com/matrix-org/synapse/issues/5643)) +- Remove unused and unnecessary check for FederationDeniedError in _exception_to_failure. ([\#5645](https://github.com/matrix-org/synapse/issues/5645)) +- Fix a small typo in a code comment. ([\#5655](https://github.com/matrix-org/synapse/issues/5655)) +- Clean up exception handling around client access tokens. ([\#5656](https://github.com/matrix-org/synapse/issues/5656)) +- Add a mechanism for per-test homeserver configuration in the unit tests. ([\#5657](https://github.com/matrix-org/synapse/issues/5657)) +- Inline issue_access_token. ([\#5659](https://github.com/matrix-org/synapse/issues/5659)) +- Update the sytest BuildKite configuration to checkout Synapse in `/src`. ([\#5664](https://github.com/matrix-org/synapse/issues/5664)) +- Add a `docker` type to the towncrier configuration. ([\#5673](https://github.com/matrix-org/synapse/issues/5673)) +- Convert `synapse.federation.transport.server` to `async`. Might improve some stack traces. ([\#5689](https://github.com/matrix-org/synapse/issues/5689)) +- Documentation for opentracing. ([\#5703](https://github.com/matrix-org/synapse/issues/5703)) + + Synapse 1.1.0 (2019-07-04) ========================== diff --git a/changelog.d/5397.doc b/changelog.d/5397.doc deleted file mode 100644 index c2b500b482..0000000000 --- a/changelog.d/5397.doc +++ /dev/null @@ -1 +0,0 @@ -Add information about nginx normalisation to reverse_proxy.rst. Contributed by @skalarproduktraum - thanks! diff --git a/changelog.d/5544.feature b/changelog.d/5544.feature deleted file mode 100644 index 7d3459129d..0000000000 --- a/changelog.d/5544.feature +++ /dev/null @@ -1,2 +0,0 @@ -Add support for opentracing. - diff --git a/changelog.d/5589.feature b/changelog.d/5589.feature deleted file mode 100644 index a87e669dd4..0000000000 --- a/changelog.d/5589.feature +++ /dev/null @@ -1 +0,0 @@ -Add ability to pull all locally stored events out of synapse that a particular user can see. diff --git a/changelog.d/5597.feature b/changelog.d/5597.feature deleted file mode 100644 index 6f92748885..0000000000 --- a/changelog.d/5597.feature +++ /dev/null @@ -1 +0,0 @@ -Add a basic admin command app to allow server operators to run Synapse admin commands separately from the main production instance. diff --git a/changelog.d/5606.misc b/changelog.d/5606.misc deleted file mode 100644 index bb3c028167..0000000000 --- a/changelog.d/5606.misc +++ /dev/null @@ -1 +0,0 @@ -Move logging code out of `synapse.util` and into `synapse.logging`. diff --git a/changelog.d/5609.bugfix b/changelog.d/5609.bugfix deleted file mode 100644 index 534ee22a1b..0000000000 --- a/changelog.d/5609.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix 'utime went backwards' errors on daemonization. diff --git a/changelog.d/5611.misc b/changelog.d/5611.misc deleted file mode 100644 index a2d1695139..0000000000 --- a/changelog.d/5611.misc +++ /dev/null @@ -1 +0,0 @@ -Add a blacklist file to the repo to blacklist certain sytests from failing CI. diff --git a/changelog.d/5613.feature b/changelog.d/5613.feature deleted file mode 100644 index 4b7bb2745c..0000000000 --- a/changelog.d/5613.feature +++ /dev/null @@ -1 +0,0 @@ -Add `sender` and `origin_server_ts` fields to `m.replace`. diff --git a/changelog.d/5616.misc b/changelog.d/5616.misc deleted file mode 100644 index 9f94be6778..0000000000 --- a/changelog.d/5616.misc +++ /dev/null @@ -1 +0,0 @@ -Make runtime errors surrounding password reset emails much clearer. diff --git a/changelog.d/5617.misc b/changelog.d/5617.misc deleted file mode 100644 index bb3c028167..0000000000 --- a/changelog.d/5617.misc +++ /dev/null @@ -1 +0,0 @@ -Move logging code out of `synapse.util` and into `synapse.logging`. diff --git a/changelog.d/5619.docker b/changelog.d/5619.docker deleted file mode 100644 index b69e5cc57c..0000000000 --- a/changelog.d/5619.docker +++ /dev/null @@ -1 +0,0 @@ -Base Docker image on a newer Alpine Linux version (3.8 -> 3.10). diff --git a/changelog.d/5620.docker b/changelog.d/5620.docker deleted file mode 100644 index cbb5a75d6a..0000000000 --- a/changelog.d/5620.docker +++ /dev/null @@ -1 +0,0 @@ -Add missing space in default logging file format generated by the Docker image. diff --git a/changelog.d/5621.bugfix b/changelog.d/5621.bugfix deleted file mode 100644 index f1a2851f45..0000000000 --- a/changelog.d/5621.bugfix +++ /dev/null @@ -1 +0,0 @@ -Various minor fixes to the federation request rate limiter. diff --git a/changelog.d/5622.misc b/changelog.d/5622.misc deleted file mode 100644 index 9f0a87311c..0000000000 --- a/changelog.d/5622.misc +++ /dev/null @@ -1 +0,0 @@ -Remove dead code for persiting outgoing federation transactions. diff --git a/changelog.d/5623.feature b/changelog.d/5623.feature deleted file mode 100644 index b73080e88d..0000000000 --- a/changelog.d/5623.feature +++ /dev/null @@ -1 +0,0 @@ -Add default push rule to ignore reactions. diff --git a/changelog.d/5625.removal b/changelog.d/5625.removal deleted file mode 100644 index d33a778d69..0000000000 --- a/changelog.d/5625.removal +++ /dev/null @@ -1 +0,0 @@ -Remove support for the `invite_3pid_guest` configuration setting. diff --git a/changelog.d/5626.feature b/changelog.d/5626.feature deleted file mode 100644 index 5ef793b943..0000000000 --- a/changelog.d/5626.feature +++ /dev/null @@ -1 +0,0 @@ -Include the original event when asking for its relations. diff --git a/changelog.d/5627.misc b/changelog.d/5627.misc deleted file mode 100644 index 730721b5ef..0000000000 --- a/changelog.d/5627.misc +++ /dev/null @@ -1 +0,0 @@ -Add `lint.sh` to the scripts-dev folder which will run all linting steps required by CI. diff --git a/changelog.d/5628.misc b/changelog.d/5628.misc deleted file mode 100644 index fec8446793..0000000000 --- a/changelog.d/5628.misc +++ /dev/null @@ -1 +0,0 @@ -Move RegistrationHandler.get_or_create_user to test code. diff --git a/changelog.d/5629.bugfix b/changelog.d/5629.bugfix deleted file mode 100644 index 672eabad40..0000000000 --- a/changelog.d/5629.bugfix +++ /dev/null @@ -1 +0,0 @@ -Forbid viewing relations on an event once it has been redacted. diff --git a/changelog.d/5630.misc b/changelog.d/5630.misc deleted file mode 100644 index f112d873eb..0000000000 --- a/changelog.d/5630.misc +++ /dev/null @@ -1 +0,0 @@ -Add some more common python virtual-environment paths to the black exclusion list. diff --git a/changelog.d/5636.misc b/changelog.d/5636.misc deleted file mode 100644 index 3add990283..0000000000 --- a/changelog.d/5636.misc +++ /dev/null @@ -1 +0,0 @@ -Some counter metrics exposed over Prometheus have been renamed, with the old names preserved for backwards compatibility and deprecated. See `docs/metrics-howto.rst` for details. \ No newline at end of file diff --git a/changelog.d/5637.misc b/changelog.d/5637.misc deleted file mode 100644 index f18d6197e5..0000000000 --- a/changelog.d/5637.misc +++ /dev/null @@ -1 +0,0 @@ -Unblacklist some user_directory sytests. diff --git a/changelog.d/5638.bugfix b/changelog.d/5638.bugfix deleted file mode 100644 index 66781ad9e6..0000000000 --- a/changelog.d/5638.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix requests to the `/store_invite` endpoint of identity servers being sent in the wrong format. diff --git a/changelog.d/5639.misc b/changelog.d/5639.misc deleted file mode 100644 index 413b13128c..0000000000 --- a/changelog.d/5639.misc +++ /dev/null @@ -1 +0,0 @@ -Factor out some redundant code in the login implementation. diff --git a/changelog.d/5640.misc b/changelog.d/5640.misc deleted file mode 100644 index 7d69a1b3b6..0000000000 --- a/changelog.d/5640.misc +++ /dev/null @@ -1 +0,0 @@ -Update ModuleApi to avoid register(generate_token=True). diff --git a/changelog.d/5641.misc b/changelog.d/5641.misc deleted file mode 100644 index 1899bc963d..0000000000 --- a/changelog.d/5641.misc +++ /dev/null @@ -1 +0,0 @@ -Remove access-token support from RegistrationHandler.register, and rename it. diff --git a/changelog.d/5642.misc b/changelog.d/5642.misc deleted file mode 100644 index e7f8e214a4..0000000000 --- a/changelog.d/5642.misc +++ /dev/null @@ -1 +0,0 @@ -Remove access-token support from `RegistrationStore.register`, and rename it. diff --git a/changelog.d/5643.misc b/changelog.d/5643.misc deleted file mode 100644 index 2b2316469e..0000000000 --- a/changelog.d/5643.misc +++ /dev/null @@ -1 +0,0 @@ -Improve logging for auto-join when a new user is created. diff --git a/changelog.d/5644.bugfix b/changelog.d/5644.bugfix deleted file mode 100644 index f6302fd08d..0000000000 --- a/changelog.d/5644.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix newly-registered users not being able to lookup their own profile without joining a room. diff --git a/changelog.d/5645.misc b/changelog.d/5645.misc deleted file mode 100644 index 4fa9699e4f..0000000000 --- a/changelog.d/5645.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unused and unnecessary check for FederationDeniedError in _exception_to_failure. \ No newline at end of file diff --git a/changelog.d/5651.doc b/changelog.d/5651.doc deleted file mode 100644 index e2d5a8dc8a..0000000000 --- a/changelog.d/5651.doc +++ /dev/null @@ -1 +0,0 @@ ---no-pep517 should be --no-use-pep517 in the documentation to setup the development environment. diff --git a/changelog.d/5654.bugfix b/changelog.d/5654.bugfix deleted file mode 100644 index 5f76b041cd..0000000000 --- a/changelog.d/5654.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug in #5626 that prevented the original_event field from actually having the contents of the original event in a call to `/relations`. \ No newline at end of file diff --git a/changelog.d/5655.misc b/changelog.d/5655.misc deleted file mode 100644 index acab6aee92..0000000000 --- a/changelog.d/5655.misc +++ /dev/null @@ -1 +0,0 @@ -Fix a small typo in a code comment. \ No newline at end of file diff --git a/changelog.d/5656.misc b/changelog.d/5656.misc deleted file mode 100644 index a8de20a7d0..0000000000 --- a/changelog.d/5656.misc +++ /dev/null @@ -1 +0,0 @@ -Clean up exception handling around client access tokens. diff --git a/changelog.d/5657.misc b/changelog.d/5657.misc deleted file mode 100644 index bdec9ae4c0..0000000000 --- a/changelog.d/5657.misc +++ /dev/null @@ -1 +0,0 @@ -Add a mechanism for per-test homeserver configuration in the unit tests. diff --git a/changelog.d/5658.bugfix b/changelog.d/5658.bugfix deleted file mode 100644 index f6ae906a9a..0000000000 --- a/changelog.d/5658.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix 3PID bind requests being sent to identity servers as `application/x-form-www-urlencoded` data, which is deprecated. diff --git a/changelog.d/5659.misc b/changelog.d/5659.misc deleted file mode 100644 index 686001295c..0000000000 --- a/changelog.d/5659.misc +++ /dev/null @@ -1 +0,0 @@ -Inline issue_access_token. diff --git a/changelog.d/5660.feature b/changelog.d/5660.feature deleted file mode 100644 index 82889fdaf1..0000000000 --- a/changelog.d/5660.feature +++ /dev/null @@ -1 +0,0 @@ -Implement `session_lifetime` configuration option, after which access tokens will expire. diff --git a/changelog.d/5661.doc b/changelog.d/5661.doc deleted file mode 100644 index c70e62014e..0000000000 --- a/changelog.d/5661.doc +++ /dev/null @@ -1 +0,0 @@ -Improvements to Postgres setup instructions. Contributed by @Lrizika - thanks! diff --git a/changelog.d/5664.misc b/changelog.d/5664.misc deleted file mode 100644 index 0ca7a0fbd0..0000000000 --- a/changelog.d/5664.misc +++ /dev/null @@ -1 +0,0 @@ -Update the sytest BuildKite configuration to checkout Synapse in `/src`. diff --git a/changelog.d/5673.misc b/changelog.d/5673.misc deleted file mode 100644 index 1942256358..0000000000 --- a/changelog.d/5673.misc +++ /dev/null @@ -1 +0,0 @@ -Add a `docker` type to the towncrier configuration. diff --git a/changelog.d/5674.feature b/changelog.d/5674.feature deleted file mode 100644 index 04bdfa4ad5..0000000000 --- a/changelog.d/5674.feature +++ /dev/null @@ -1 +0,0 @@ -Return "This account has been deactivated" when a deactivated user tries to login. diff --git a/changelog.d/5675.doc b/changelog.d/5675.doc deleted file mode 100644 index 4cd4d0be1a..0000000000 --- a/changelog.d/5675.doc +++ /dev/null @@ -1 +0,0 @@ -Minor tweaks to postgres documentation. diff --git a/changelog.d/5689.misc b/changelog.d/5689.misc deleted file mode 100644 index 8aa3e3f6a2..0000000000 --- a/changelog.d/5689.misc +++ /dev/null @@ -1 +0,0 @@ -Convert `synapse.federation.transport.server` to `async`. Might improve some stack traces. \ No newline at end of file diff --git a/changelog.d/5699.bugfix b/changelog.d/5699.bugfix deleted file mode 100644 index 30d5e67f67..0000000000 --- a/changelog.d/5699.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix some problems with authenticating redactions in recent room versions. \ No newline at end of file diff --git a/changelog.d/5700.bugfix b/changelog.d/5700.bugfix deleted file mode 100644 index 51bce8d441..0000000000 --- a/changelog.d/5700.bugfix +++ /dev/null @@ -1,2 +0,0 @@ -Fix some problems with authenticating redactions in recent room versions. - diff --git a/changelog.d/5701.bugfix b/changelog.d/5701.bugfix deleted file mode 100644 index fd2866e16a..0000000000 --- a/changelog.d/5701.bugfix +++ /dev/null @@ -1 +0,0 @@ -Ignore redactions of m.room.create events. diff --git a/changelog.d/5703.misc b/changelog.d/5703.misc deleted file mode 100644 index 6e9b2d734e..0000000000 --- a/changelog.d/5703.misc +++ /dev/null @@ -1 +0,0 @@ -Documentation for opentracing. diff --git a/changelog.d/5707.bugfix b/changelog.d/5707.bugfix deleted file mode 100644 index aa3046c5e1..0000000000 --- a/changelog.d/5707.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix some problems with authenticating redactions in recent room versions. diff --git a/changelog.d/5712.feature b/changelog.d/5712.feature deleted file mode 100644 index 7d3459129d..0000000000 --- a/changelog.d/5712.feature +++ /dev/null @@ -1,2 +0,0 @@ -Add support for opentracing. - diff --git a/changelog.d/5714.feature b/changelog.d/5714.feature deleted file mode 100644 index 2fd32e5e38..0000000000 --- a/changelog.d/5714.feature +++ /dev/null @@ -1 +0,0 @@ -Enable aggregations support by default diff --git a/synapse/__init__.py b/synapse/__init__.py index cf22fabd61..f26e49da36 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -35,4 +35,4 @@ try: except ImportError: pass -__version__ = "1.1.0" +__version__ = "1.2.0rc1" From 5ea773c50568088b30e15728b65480d0335fe14e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 22 Jul 2019 13:15:08 +0100 Subject: [PATCH 07/72] Cache get_version_string. The version of a module isn't going to change over the lifetime of the process (assuming no funky hot reloading is going on, which it isn't), so let's just cache the result to avoid spawning lots of git subprocesses. Fixes #5672. --- synapse/util/versionstring.py | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/synapse/util/versionstring.py b/synapse/util/versionstring.py index a4d9a462f7..fa404b9d75 100644 --- a/synapse/util/versionstring.py +++ b/synapse/util/versionstring.py @@ -22,6 +22,23 @@ logger = logging.getLogger(__name__) def get_version_string(module): + """Given a module calculate a git-aware version string for it. + + If called on a module not in a git checkout will return `__verison__`. + + Args: + module (module) + + Returns: + str + """ + + cached_version = getattr(module, "_synapse_version_string_cache", None) + if cached_version: + return cached_version + + version_string = module.__version__ + try: null = open(os.devnull, "w") cwd = os.path.dirname(os.path.abspath(module.__file__)) @@ -80,8 +97,10 @@ def get_version_string(module): s for s in (git_branch, git_tag, git_commit, git_dirty) if s ) - return "%s (%s)" % (module.__version__, git_version) + version_string = "%s (%s)" % (module.__version__, git_version) except Exception as e: logger.info("Failed to check for git repository: %s", e) - return module.__version__ + module._synapse_version_string_cache = version_string + + return version_string From 8b0d5b171e1af3eb9a8a380af1be0f9c71d22d17 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Mon, 22 Jul 2019 13:15:35 +0100 Subject: [PATCH 08/72] Make changelog slightly more readable --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index e42fe1ba4d..bb6bcb75ed 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -65,7 +65,7 @@ Internal Changes - Unblacklist some user_directory sytests. ([\#5637](https://github.com/matrix-org/synapse/issues/5637)) - Factor out some redundant code in the login implementation. ([\#5639](https://github.com/matrix-org/synapse/issues/5639)) - Update ModuleApi to avoid register(generate_token=True). ([\#5640](https://github.com/matrix-org/synapse/issues/5640)) -- Remove access-token support from RegistrationHandler.register, and rename it. ([\#5641](https://github.com/matrix-org/synapse/issues/5641)) +- Remove access-token support from `RegistrationHandler.register`, and rename it. ([\#5641](https://github.com/matrix-org/synapse/issues/5641)) - Remove access-token support from `RegistrationStore.register`, and rename it. ([\#5642](https://github.com/matrix-org/synapse/issues/5642)) - Improve logging for auto-join when a new user is created. ([\#5643](https://github.com/matrix-org/synapse/issues/5643)) - Remove unused and unnecessary check for FederationDeniedError in _exception_to_failure. ([\#5645](https://github.com/matrix-org/synapse/issues/5645)) From 2017369f7d134fb40f52564123819d6c77f4f9b0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 22 Jul 2019 13:18:25 +0100 Subject: [PATCH 09/72] Newsfile --- changelog.d/5730.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5730.misc diff --git a/changelog.d/5730.misc b/changelog.d/5730.misc new file mode 100644 index 0000000000..88767fe2f7 --- /dev/null +++ b/changelog.d/5730.misc @@ -0,0 +1 @@ +Cache result of get_version_string to reduce overhead off /versions client and federation requests. From 66f5ff72fd2179c2cbb6a7755d36273d51a2e32f Mon Sep 17 00:00:00 2001 From: Jason Robinson Date: Mon, 22 Jul 2019 15:21:19 +0300 Subject: [PATCH 10/72] Add `user_type` to returned fields in admin API user list endpoints Mostly user type will be empty (normal user) but there is also the "support" user type. Signed-off-by: Jason Robinson --- changelog.d/5731.misc | 1 + synapse/storage/__init__.py | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/5731.misc diff --git a/changelog.d/5731.misc b/changelog.d/5731.misc new file mode 100644 index 0000000000..dffae5d874 --- /dev/null +++ b/changelog.d/5731.misc @@ -0,0 +1 @@ +Return 'user_type' in admin API user endpoints results. diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 6b0ca80087..86a333a919 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -469,7 +469,7 @@ class DataStore( return self._simple_select_list( table="users", keyvalues={}, - retcols=["name", "password_hash", "is_guest", "admin"], + retcols=["name", "password_hash", "is_guest", "admin", "user_type"], desc="get_users", ) @@ -494,7 +494,7 @@ class DataStore( orderby=order, start=start, limit=limit, - retcols=["name", "password_hash", "is_guest", "admin"], + retcols=["name", "password_hash", "is_guest", "admin", "user_type"], ) count = yield self.runInteraction("get_users_paginate", self.get_user_count_txn) retval = {"users": users, "total": count} @@ -514,7 +514,7 @@ class DataStore( table="users", term=term, col="name", - retcols=["name", "password_hash", "is_guest", "admin"], + retcols=["name", "password_hash", "is_guest", "admin", "user_type"], desc="search_users", ) From 22e862304a9c3faac86d4373a50a3b7efd6758b1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 22 Jul 2019 14:09:56 +0100 Subject: [PATCH 11/72] Update changelog.d/5730.misc Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/5730.misc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/5730.misc b/changelog.d/5730.misc index 88767fe2f7..a99677f5e7 100644 --- a/changelog.d/5730.misc +++ b/changelog.d/5730.misc @@ -1 +1 @@ -Cache result of get_version_string to reduce overhead off /versions client and federation requests. +Cache result of get_version_string to reduce overhead of `/version` federation requests. From c560b791e1eb50cded53886b926abdc102cf2e51 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 22 Jul 2019 14:19:38 +0100 Subject: [PATCH 12/72] Add process hooks to tell systemd our state. Fixes #5676. --- synapse/app/_base.py | 29 +++++++++++++++++++++++++++++ synapse/python_dependencies.py | 1 + 2 files changed, 30 insertions(+) diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 540dbd9236..c010e70955 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -15,10 +15,12 @@ import gc import logging +import os import signal import sys import traceback +import sdnotify from daemonize import Daemonize from twisted.internet import defer, error, reactor @@ -242,9 +244,16 @@ def start(hs, listeners=None): if hasattr(signal, "SIGHUP"): def handle_sighup(*args, **kwargs): + # Tell systemd our state, if we're using it. This will silently fail if + # we're not using systemd. + sd_channel = sdnotify.SystemdNotifier() + sd_channel.notify("RELOADING=1") + for i in _sighup_callbacks: i(hs) + sd_channel.notify("READY=1") + signal.signal(signal.SIGHUP, handle_sighup) register_sighup(refresh_certificate) @@ -260,6 +269,7 @@ def start(hs, listeners=None): hs.get_datastore().start_profiling() setup_sentry(hs) + setup_sdnotify(hs) except Exception: traceback.print_exc(file=sys.stderr) reactor = hs.get_reactor() @@ -292,6 +302,25 @@ def setup_sentry(hs): scope.set_tag("worker_name", name) +def setup_sdnotify(hs): + """Adds process state hooks to tell systemd what we are up to. + """ + + # Tell systemd our state, if we're using it. This will silently fail if + # we're not using systemd. + sd_channel = sdnotify.SystemdNotifier() + + hs.get_reactor().addSystemEventTrigger( + "after", + "startup", + lambda: sd_channel.notify("READY=1\nMAINPID=%s" % (os.getpid())), + ) + + hs.get_reactor().addSystemEventTrigger( + "before", "shutdown", lambda: sd_channel.notify("STOPPING=1") + ) + + def install_dns_limiter(reactor, max_dns_requests_in_flight=100): """Replaces the resolver with one that limits the number of in flight DNS requests. diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index c6465c0386..195a7a70c8 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -72,6 +72,7 @@ REQUIREMENTS = [ "netaddr>=0.7.18", "Jinja2>=2.9", "bleach>=1.4.3", + "sdnotify>=0.3", ] CONDITIONAL_REQUIREMENTS = { From 79f689e6c2ead8b24bf75fcb99acd0fb0faca324 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 22 Jul 2019 14:51:53 +0100 Subject: [PATCH 13/72] Newsfile --- changelog.d/5732.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5732.feature diff --git a/changelog.d/5732.feature b/changelog.d/5732.feature new file mode 100644 index 0000000000..9021864350 --- /dev/null +++ b/changelog.d/5732.feature @@ -0,0 +1 @@ +Add sd_notify hooks to ease systemd integration and allows usage of Type=Notify. From 80cfad233efc6b03c75ab5496db7079466eeb894 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 22 Jul 2019 15:22:14 +0100 Subject: [PATCH 14/72] Call startup commands as system triggers. This helps ensures that we only consider ourselves "up" once all the startup functions have completed. --- synapse/app/appservice.py | 4 +++- synapse/app/client_reader.py | 4 +++- synapse/app/event_creator.py | 4 +++- synapse/app/federation_reader.py | 4 +++- synapse/app/federation_sender.py | 4 +++- synapse/app/frontend_proxy.py | 4 +++- synapse/app/homeserver.py | 2 +- synapse/app/media_repository.py | 4 +++- synapse/app/pusher.py | 2 +- synapse/app/synchrotron.py | 4 +++- synapse/app/user_dir.py | 4 +++- 11 files changed, 29 insertions(+), 11 deletions(-) diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index e01f3e5f3b..54bb114dec 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -168,7 +168,9 @@ def start(config_options): ) ps.setup() - reactor.callWhenRunning(_base.start, ps, config.worker_listeners) + reactor.addSystemEventTrigger( + "before", "startup", _base.start, ps, config.worker_listeners + ) _base.start_worker_reactor("synapse-appservice", config) diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index 29bddc4823..721bb5b119 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -194,7 +194,9 @@ def start(config_options): ) ss.setup() - reactor.callWhenRunning(_base.start, ss, config.worker_listeners) + reactor.addSystemEventTrigger( + "before", "startup", _base.start, ss, config.worker_listeners + ) _base.start_worker_reactor("synapse-client-reader", config) diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index 042cfd04af..473c8895d0 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -193,7 +193,9 @@ def start(config_options): ) ss.setup() - reactor.callWhenRunning(_base.start, ss, config.worker_listeners) + reactor.addSystemEventTrigger( + "before", "startup", _base.start, ss, config.worker_listeners + ) _base.start_worker_reactor("synapse-event-creator", config) diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index 76a97f8f32..5255d9e8cc 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -175,7 +175,9 @@ def start(config_options): ) ss.setup() - reactor.callWhenRunning(_base.start, ss, config.worker_listeners) + reactor.addSystemEventTrigger( + "before", "startup", _base.start, ss, config.worker_listeners + ) _base.start_worker_reactor("synapse-federation-reader", config) diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index fec49d5092..c5a2880e69 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -198,7 +198,9 @@ def start(config_options): ) ss.setup() - reactor.callWhenRunning(_base.start, ss, config.worker_listeners) + reactor.addSystemEventTrigger( + "before", "startup", _base.start, ss, config.worker_listeners + ) _base.start_worker_reactor("synapse-federation-sender", config) diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index 1f1f1df78e..5b563c2778 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -247,7 +247,9 @@ def start(config_options): ) ss.setup() - reactor.callWhenRunning(_base.start, ss, config.worker_listeners) + reactor.addSystemEventTrigger( + "before", "startup", _base.start, ss, config.worker_listeners + ) _base.start_worker_reactor("synapse-frontend-proxy", config) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 0c075cb3f1..34c3f5ee99 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -447,7 +447,7 @@ def setup(config_options): reactor.stop() sys.exit(1) - reactor.callWhenRunning(start) + reactor.addSystemEventTrigger("before", "startup", start) return hs diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py index d70780e9d5..ea26f29acb 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py @@ -161,7 +161,9 @@ def start(config_options): ) ss.setup() - reactor.callWhenRunning(_base.start, ss, config.worker_listeners) + reactor.addSystemEventTrigger( + "before", "startup", _base.start, ss, config.worker_listeners + ) _base.start_worker_reactor("synapse-media-repository", config) diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 070de7d0b0..692ffa2f04 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -216,7 +216,7 @@ def start(config_options): _base.start(ps, config.worker_listeners) ps.get_pusherpool().start() - reactor.callWhenRunning(start) + reactor.addSystemEventTrigger("before", "startup", start) _base.start_worker_reactor("synapse-pusher", config) diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 315c030694..a1c3b162f7 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -451,7 +451,9 @@ def start(config_options): ) ss.setup() - reactor.callWhenRunning(_base.start, ss, config.worker_listeners) + reactor.addSystemEventTrigger( + "before", "startup", _base.start, ss, config.worker_listeners + ) _base.start_worker_reactor("synapse-synchrotron", config) diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index 03ef21bd01..cb29a1afab 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -224,7 +224,9 @@ def start(config_options): ) ss.setup() - reactor.callWhenRunning(_base.start, ss, config.worker_listeners) + reactor.addSystemEventTrigger( + "before", "startup", _base.start, ss, config.worker_listeners + ) _base.start_worker_reactor("synapse-user-dir", config) From 17c27df6ea81f18da186ada5e3b79200c84f6d55 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 22 Jul 2019 15:24:25 +0100 Subject: [PATCH 15/72] Update example systemd service file --- contrib/systemd/matrix-synapse.service | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/contrib/systemd/matrix-synapse.service b/contrib/systemd/matrix-synapse.service index 595b69916c..38d369ea3d 100644 --- a/contrib/systemd/matrix-synapse.service +++ b/contrib/systemd/matrix-synapse.service @@ -14,7 +14,9 @@ Description=Synapse Matrix homeserver [Service] -Type=simple +Type=notify +NotifyAccess=main +ExecReload=/bin/kill -HUP $MAINPID Restart=on-abort User=synapse From 0d0f6d12bc84b106ac83ecf824bd722a08070b78 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 23 Jul 2019 01:05:00 +1000 Subject: [PATCH 16/72] Fix logging in workers (#5729) This also adds a worker blacklist. --- .buildkite/pipeline.yml | 3 ++- .buildkite/worker-blacklist | 28 ++++++++++++++++++++++++++++ changelog.d/5729.removal | 1 + synapse/config/workers.py | 1 + 4 files changed, 32 insertions(+), 1 deletion(-) create mode 100644 .buildkite/worker-blacklist create mode 100644 changelog.d/5729.removal diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index d5e5aeec6b..c8ae1a44be 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -220,8 +220,10 @@ steps: env: POSTGRES: "1" WORKERS: "1" + BLACKLIST: "synapse-blacklist-with-workers" command: - "bash .buildkite/merge_base_branch.sh" + - "bash -c 'cat /src/sytest-blacklist /src/.buildkite/worker-blacklist > /src/synapse-blacklist-with-workers'" - "bash /synapse_sytest.sh" plugins: - docker#v3.0.1: @@ -229,7 +231,6 @@ steps: propagate-environment: true always-pull: true workdir: "/src" - soft_fail: true retry: automatic: - exit_status: -1 diff --git a/.buildkite/worker-blacklist b/.buildkite/worker-blacklist new file mode 100644 index 0000000000..a211ed7b18 --- /dev/null +++ b/.buildkite/worker-blacklist @@ -0,0 +1,28 @@ +# This file serves as a blacklist for SyTest tests that we expect will fail in +# Synapse when run under worker mode. For more details, see sytest-blacklist. + +Message history can be paginated + +m.room.history_visibility == "world_readable" allows/forbids appropriately for Guest users + +m.room.history_visibility == "world_readable" allows/forbids appropriately for Real users + +Can re-join room if re-invited + +/upgrade creates a new room + +The only membership state included in an initial sync is for all the senders in the timeline + +Local device key changes get to remote servers + +If remote user leaves room we no longer receive device updates + +Forgotten room messages cannot be paginated + +Inbound federation can get public room list + +Members from the gap are included in gappy incr LL sync + +Leaves are present in non-gapped incremental syncs + +Old leaves are present in gapped incremental syncs \ No newline at end of file diff --git a/changelog.d/5729.removal b/changelog.d/5729.removal new file mode 100644 index 0000000000..3af5198e6b --- /dev/null +++ b/changelog.d/5729.removal @@ -0,0 +1 @@ + Synapse now no longer accepts the `-v`/`--verbose`, `-f`/`--log-file`, or `--log-config` command line flags, and removes the deprecated `verbose` and `log_file` configuration file options. Users of these options should migrate their options into the dedicated log configuration. diff --git a/synapse/config/workers.py b/synapse/config/workers.py index 246d72cd61..bc0fc165e3 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -31,6 +31,7 @@ class WorkerConfig(Config): self.worker_listeners = config.get("worker_listeners", []) self.worker_daemonize = config.get("worker_daemonize") self.worker_pid_file = config.get("worker_pid_file") + self.worker_log_config = config.get("worker_log_config") # The host used to connect to the main synapse self.worker_replication_host = config.get("worker_replication_host", None) From c96322c8d2f934e4ccd73e6eac3e2e7e7a4af916 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 22 Jul 2019 16:07:12 +0100 Subject: [PATCH 17/72] Don't package sytest-blacklist file. I don't think its useful, and I don't even know where it would end up. --- MANIFEST.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MANIFEST.in b/MANIFEST.in index 834ddfad39..919cd8a1cd 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -7,7 +7,6 @@ include demo/README include demo/demo.tls.dh include demo/*.py include demo/*.sh -include sytest-blacklist recursive-include synapse/storage/schema *.sql recursive-include synapse/storage/schema *.sql.postgres @@ -34,6 +33,7 @@ exclude Dockerfile exclude .dockerignore exclude test_postgresql.sh exclude .editorconfig +exclude sytest-blacklist include pyproject.toml recursive-include changelog.d * From d9ea9881d252790ac7f1e3525217e37ef9bbceb9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 22 Jul 2019 16:09:15 +0100 Subject: [PATCH 18/72] Newsfile --- changelog.d/5733.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5733.misc diff --git a/changelog.d/5733.misc b/changelog.d/5733.misc new file mode 100644 index 0000000000..a2a8c26383 --- /dev/null +++ b/changelog.d/5733.misc @@ -0,0 +1 @@ +Don't package the sytest test blacklist file. From b2a629ef498df2b0585c9474613dda778bec7be0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Jul 2019 09:49:26 +0100 Subject: [PATCH 19/72] Speed up current state background update. Turns out that storing huge JSON arrays in the progress JSON isn't something that postgres particularly likes. --- synapse/storage/roommember.py | 50 ++++++++++++++++++++++------------- 1 file changed, 31 insertions(+), 19 deletions(-) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 257bcdb2f8..b3c002b9eb 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -852,22 +852,25 @@ class RoomMemberStore(RoomMemberWorkerStore): @defer.inlineCallbacks def _background_current_state_membership(self, progress, batch_size): """Update the new membership column on current_state_events. + + This works by iterating over all rooms in alphebetical order. """ - if "rooms" not in progress: - rooms = yield self._simple_select_onecol( - table="current_state_events", - keyvalues={}, - retcol="DISTINCT room_id", - desc="_background_current_state_membership_get_rooms", - ) - progress["rooms"] = rooms - - rooms = progress["rooms"] - - def _background_current_state_membership_txn(txn): + def _background_current_state_membership_txn(txn, last_processed_room): processed = 0 - while rooms and processed < batch_size: + while processed < batch_size: + txn.execute( + """ + SELECT MIN(room_id) FROM rooms WHERE room_id > ? + """, + (last_processed_room,), + ) + row = txn.fetchone() + if not row or not row[0]: + return processed, True + + next_room, = row + sql = """ UPDATE current_state_events AS c SET membership = ( @@ -876,24 +879,33 @@ class RoomMemberStore(RoomMemberWorkerStore): ) WHERE room_id = ? """ - txn.execute(sql, (rooms.pop(),)) + txn.execute(sql, (next_room,)) processed += txn.rowcount + last_processed_room = next_room + self._background_update_progress_txn( - txn, _CURRENT_STATE_MEMBERSHIP_UPDATE_NAME, progress + txn, + _CURRENT_STATE_MEMBERSHIP_UPDATE_NAME, + {"last_processed_room": last_processed_room}, ) - return processed + return processed, False - result = yield self.runInteraction( + # If we haven't got a last processed room then just use the empty + # string, which will compare before all room IDs correctly. + last_processed_room = progress.get("last_processed_room", "") + + row_count, finished = yield self.runInteraction( "_background_current_state_membership_update", _background_current_state_membership_txn, + last_processed_room, ) - if not rooms: + if finished: yield self._end_background_update(_CURRENT_STATE_MEMBERSHIP_UPDATE_NAME) - defer.returnValue(result) + defer.returnValue(row_count) class _JoinedHostsCache(object): From cf0006719d9086000ddcd2d3129364197a6fa875 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Jul 2019 09:53:23 +0100 Subject: [PATCH 20/72] Newsfile --- changelog.d/5738.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5738.misc diff --git a/changelog.d/5738.misc b/changelog.d/5738.misc new file mode 100644 index 0000000000..5e15dfd5fa --- /dev/null +++ b/changelog.d/5738.misc @@ -0,0 +1 @@ +Reduce database IO usage by optimising queries for current membership. From cda4460d99d0956359767ef7a2b8a9740d5aec7c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Jul 2019 10:13:12 +0100 Subject: [PATCH 21/72] Also update systemd-with-workers contrib examples --- .../systemd-with-workers/system/matrix-synapse-worker@.service | 3 ++- contrib/systemd-with-workers/system/matrix-synapse.service | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/contrib/systemd-with-workers/system/matrix-synapse-worker@.service b/contrib/systemd-with-workers/system/matrix-synapse-worker@.service index 9d980d5168..3507e2e989 100644 --- a/contrib/systemd-with-workers/system/matrix-synapse-worker@.service +++ b/contrib/systemd-with-workers/system/matrix-synapse-worker@.service @@ -4,7 +4,8 @@ After=matrix-synapse.service BindsTo=matrix-synapse.service [Service] -Type=simple +Type=notify +NotifyAccess=main User=matrix-synapse WorkingDirectory=/var/lib/matrix-synapse EnvironmentFile=/etc/default/matrix-synapse diff --git a/contrib/systemd-with-workers/system/matrix-synapse.service b/contrib/systemd-with-workers/system/matrix-synapse.service index 3aae19034c..68e8991f18 100644 --- a/contrib/systemd-with-workers/system/matrix-synapse.service +++ b/contrib/systemd-with-workers/system/matrix-synapse.service @@ -2,7 +2,8 @@ Description=Synapse Matrix Homeserver [Service] -Type=simple +Type=notify +NotifyAccess=main User=matrix-synapse WorkingDirectory=/var/lib/matrix-synapse EnvironmentFile=/etc/default/matrix-synapse From 1883223a01ee17b8813a2aca9493532bb07915d0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Jul 2019 10:26:52 +0100 Subject: [PATCH 22/72] Mark flakey tests as blacklisted for worker mode --- .buildkite/worker-blacklist | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.buildkite/worker-blacklist b/.buildkite/worker-blacklist index a211ed7b18..8ed8eef1a3 100644 --- a/.buildkite/worker-blacklist +++ b/.buildkite/worker-blacklist @@ -25,4 +25,10 @@ Members from the gap are included in gappy incr LL sync Leaves are present in non-gapped incremental syncs -Old leaves are present in gapped incremental syncs \ No newline at end of file +Old leaves are present in gapped incremental syncs + +User sees updates to presence from other users in the incremental sync. + +Gapped incremental syncs include all state changes + +Old members are included in gappy incr LL sync if they start speaking From 22d2338aceed83c6b32081e0118c7653bb9474e6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Jul 2019 10:27:53 +0100 Subject: [PATCH 23/72] Newsfile --- changelog.d/5740.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5740.misc diff --git a/changelog.d/5740.misc b/changelog.d/5740.misc new file mode 100644 index 0000000000..97a476bef5 --- /dev/null +++ b/changelog.d/5740.misc @@ -0,0 +1 @@ +Blacklist some flakey tests in worker mode. From 3db1377b261eaf3fcff486547d6302ccb24553e5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Jul 2019 13:31:03 +0100 Subject: [PATCH 24/72] Log when we receive receipt from a different origin --- synapse/handlers/receipts.py | 35 ++++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index a85dd8cdee..e58bf7e360 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -17,7 +17,7 @@ import logging from twisted.internet import defer from synapse.handlers._base import BaseHandler -from synapse.types import ReadReceipt +from synapse.types import ReadReceipt, get_domain_from_id logger = logging.getLogger(__name__) @@ -40,18 +40,27 @@ class ReceiptsHandler(BaseHandler): def _received_remote_receipt(self, origin, content): """Called when we receive an EDU of type m.receipt from a remote HS. """ - receipts = [ - ReadReceipt( - room_id=room_id, - receipt_type=receipt_type, - user_id=user_id, - event_ids=user_values["event_ids"], - data=user_values.get("data", {}), - ) - for room_id, room_values in content.items() - for receipt_type, users in room_values.items() - for user_id, user_values in users.items() - ] + receipts = [] + for room_id, room_values in content.items(): + for receipt_type, users in room_values.items(): + for user_id, user_values in users.items(): + if get_domain_from_id(user_id) != origin: + logger.info( + "Received receipt for user %r from server %s, ignoring", + user_id, + origin, + ) + continue + + receipts.append( + ReadReceipt( + room_id=room_id, + receipt_type=receipt_type, + user_id=user_id, + event_ids=user_values["event_ids"], + data=user_values.get("data", {}), + ) + ) yield self._handle_new_receipts(receipts) From 18a466b84e52b6e8c51a878e612d86410d6af680 Mon Sep 17 00:00:00 2001 From: Jorik Schellekens Date: Tue, 23 Jul 2019 13:31:16 +0100 Subject: [PATCH 25/72] Opentracing Utils (#5722) * Add decerators for tracing functions * Use the new clean contexts * Context and edu utils * Move opentracing setters * Move whitelisting * Sectioning comments * Better args wrapper * Docstrings Co-Authored-By: Erik Johnston * Remove unused methods. * Don't use global * One tracing decorator to rule them all. --- changelog.d/5722.misc | 1 + synapse/logging/opentracing.py | 455 +++++++++++++++++++------ synapse/logging/scopecontextmanager.py | 2 +- 3 files changed, 357 insertions(+), 101 deletions(-) create mode 100644 changelog.d/5722.misc diff --git a/changelog.d/5722.misc b/changelog.d/5722.misc new file mode 100644 index 0000000000..f2d236188d --- /dev/null +++ b/changelog.d/5722.misc @@ -0,0 +1 @@ +Add a set of opentracing utils. diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 04393697c0..96a4714d82 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -11,7 +11,7 @@ # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and -# limitations under the License.import opentracing +# limitations under the License. # NOTE @@ -150,10 +150,13 @@ Gotchas """ import contextlib +import inspect import logging import re from functools import wraps +from canonicaljson import json + from twisted.internet import defer from synapse.config import ConfigError @@ -173,36 +176,12 @@ except ImportError: logger = logging.getLogger(__name__) -class _DumTagNames(object): - """wrapper of opentracings tags. We need to have them if we - want to reference them without opentracing around. Clearly they - should never actually show up in a trace. `set_tags` overwrites - these with the correct ones.""" +# Block everything by default +# A regex which matches the server_names to expose traces for. +# None means 'block everything'. +_homeserver_whitelist = None - INVALID_TAG = "invalid-tag" - COMPONENT = INVALID_TAG - DATABASE_INSTANCE = INVALID_TAG - DATABASE_STATEMENT = INVALID_TAG - DATABASE_TYPE = INVALID_TAG - DATABASE_USER = INVALID_TAG - ERROR = INVALID_TAG - HTTP_METHOD = INVALID_TAG - HTTP_STATUS_CODE = INVALID_TAG - HTTP_URL = INVALID_TAG - MESSAGE_BUS_DESTINATION = INVALID_TAG - PEER_ADDRESS = INVALID_TAG - PEER_HOSTNAME = INVALID_TAG - PEER_HOST_IPV4 = INVALID_TAG - PEER_HOST_IPV6 = INVALID_TAG - PEER_PORT = INVALID_TAG - PEER_SERVICE = INVALID_TAG - SAMPLING_PRIORITY = INVALID_TAG - SERVICE = INVALID_TAG - SPAN_KIND = INVALID_TAG - SPAN_KIND_CONSUMER = INVALID_TAG - SPAN_KIND_PRODUCER = INVALID_TAG - SPAN_KIND_RPC_CLIENT = INVALID_TAG - SPAN_KIND_RPC_SERVER = INVALID_TAG +# Util methods def only_if_tracing(func): @@ -219,11 +198,13 @@ def only_if_tracing(func): return _only_if_tracing_inner -# A regex which matches the server_names to expose traces for. -# None means 'block everything'. -_homeserver_whitelist = None +@contextlib.contextmanager +def _noop_context_manager(*args, **kwargs): + """Does exactly what it says on the tin""" + yield -tags = _DumTagNames + +# Setup def init_tracer(config): @@ -260,13 +241,39 @@ def init_tracer(config): tags = opentracing.tags -@contextlib.contextmanager -def _noop_context_manager(*args, **kwargs): - """Does absolutely nothing really well. Can be entered and exited arbitrarily. - Good substitute for an opentracing scope.""" - yield +# Whitelisting +@only_if_tracing +def set_homeserver_whitelist(homeserver_whitelist): + """Sets the homeserver whitelist + + Args: + homeserver_whitelist (Iterable[str]): regex of whitelisted homeservers + """ + global _homeserver_whitelist + if homeserver_whitelist: + # Makes a single regex which accepts all passed in regexes in the list + _homeserver_whitelist = re.compile( + "({})".format(")|(".join(homeserver_whitelist)) + ) + + +@only_if_tracing +def whitelisted_homeserver(destination): + """Checks if a destination matches the whitelist + + Args: + destination (str) + """ + _homeserver_whitelist + if _homeserver_whitelist: + return _homeserver_whitelist.match(destination) + return False + + +# Start spans and scopes + # Could use kwargs but I want these to be explicit def start_active_span( operation_name, @@ -285,8 +292,10 @@ def start_active_span( Returns: scope (Scope) or noop_context_manager """ + if opentracing is None: return _noop_context_manager() + else: # We need to enter the scope here for the logcontext to become active return opentracing.tracer.start_active_span( @@ -300,63 +309,13 @@ def start_active_span( ) -@only_if_tracing -def close_active_span(): - """Closes the active span. This will close it's logcontext if the context - was made for the span""" - opentracing.tracer.scope_manager.active.__exit__(None, None, None) - - -@only_if_tracing -def set_tag(key, value): - """Set's a tag on the active span""" - opentracing.tracer.active_span.set_tag(key, value) - - -@only_if_tracing -def log_kv(key_values, timestamp=None): - """Log to the active span""" - opentracing.tracer.active_span.log_kv(key_values, timestamp) - - -# Note: we don't have a get baggage items because we're trying to hide all -# scope and span state from synapse. I think this method may also be useless -# as a result -@only_if_tracing -def set_baggage_item(key, value): - """Attach baggage to the active span""" - opentracing.tracer.active_span.set_baggage_item(key, value) - - -@only_if_tracing -def set_operation_name(operation_name): - """Sets the operation name of the active span""" - opentracing.tracer.active_span.set_operation_name(operation_name) - - -@only_if_tracing -def set_homeserver_whitelist(homeserver_whitelist): - """Sets the whitelist - - Args: - homeserver_whitelist (iterable of strings): regex of whitelisted homeservers - """ - global _homeserver_whitelist - if homeserver_whitelist: - # Makes a single regex which accepts all passed in regexes in the list - _homeserver_whitelist = re.compile( - "({})".format(")|(".join(homeserver_whitelist)) - ) - - -@only_if_tracing -def whitelisted_homeserver(destination): - """Checks if a destination matches the whitelist - Args: - destination (String)""" - if _homeserver_whitelist: - return _homeserver_whitelist.match(destination) - return False +def start_active_span_follows_from(operation_name, contexts): + if opentracing is None: + return _noop_context_manager() + else: + references = [opentracing.follows_from(context) for context in contexts] + scope = start_active_span(operation_name, references=references) + return scope def start_active_span_from_context( @@ -372,12 +331,16 @@ def start_active_span_from_context( Extracts a span context from Twisted Headers. args: headers (twisted.web.http_headers.Headers) + + For the other args see opentracing.tracer + returns: span_context (opentracing.span.SpanContext) """ # Twisted encodes the values as lists whereas opentracing doesn't. # So, we take the first item in the list. # Also, twisted uses byte arrays while opentracing expects strings. + if opentracing is None: return _noop_context_manager() @@ -395,17 +358,90 @@ def start_active_span_from_context( ) +def start_active_span_from_edu( + edu_content, + operation_name, + references=[], + tags=None, + start_time=None, + ignore_active_span=False, + finish_on_close=True, +): + """ + Extracts a span context from an edu and uses it to start a new active span + + Args: + edu_content (dict): and edu_content with a `context` field whose value is + canonical json for a dict which contains opentracing information. + + For the other args see opentracing.tracer + """ + + if opentracing is None: + return _noop_context_manager() + + carrier = json.loads(edu_content.get("context", "{}")).get("opentracing", {}) + context = opentracing.tracer.extract(opentracing.Format.TEXT_MAP, carrier) + _references = [ + opentracing.child_of(span_context_from_string(x)) + for x in carrier.get("references", []) + ] + + # For some reason jaeger decided not to support the visualization of multiple parent + # spans or explicitely show references. I include the span context as a tag here as + # an aid to people debugging but it's really not an ideal solution. + + references += _references + + scope = opentracing.tracer.start_active_span( + operation_name, + child_of=context, + references=references, + tags=tags, + start_time=start_time, + ignore_active_span=ignore_active_span, + finish_on_close=finish_on_close, + ) + + scope.span.set_tag("references", carrier.get("references", [])) + return scope + + +# Opentracing setters for tags, logs, etc + + +@only_if_tracing +def set_tag(key, value): + """Sets a tag on the active span""" + opentracing.tracer.active_span.set_tag(key, value) + + +@only_if_tracing +def log_kv(key_values, timestamp=None): + """Log to the active span""" + opentracing.tracer.active_span.log_kv(key_values, timestamp) + + +@only_if_tracing +def set_operation_name(operation_name): + """Sets the operation name of the active span""" + opentracing.tracer.active_span.set_operation_name(operation_name) + + +# Injection and extraction + + @only_if_tracing def inject_active_span_twisted_headers(headers, destination): """ - Injects a span context into twisted headers inplace + Injects a span context into twisted headers in-place Args: headers (twisted.web.http_headers.Headers) span (opentracing.Span) Returns: - Inplace modification of headers + In-place modification of headers Note: The headers set by the tracer are custom to the tracer implementation which @@ -437,7 +473,7 @@ def inject_active_span_byte_dict(headers, destination): span (opentracing.Span) Returns: - Inplace modification of headers + In-place modification of headers Note: The headers set by the tracer are custom to the tracer implementation which @@ -458,9 +494,190 @@ def inject_active_span_byte_dict(headers, destination): headers[key.encode()] = [value.encode()] +@only_if_tracing +def inject_active_span_text_map(carrier, destination=None): + """ + Injects a span context into a dict + + Args: + carrier (dict) + destination (str): the name of the remote server. The span context + will only be injected if the destination matches the homeserver_whitelist + or destination is None. + + Returns: + In-place modification of carrier + + Note: + The headers set by the tracer are custom to the tracer implementation which + should be unique enough that they don't interfere with any headers set by + synapse or twisted. If we're still using jaeger these headers would be those + here: + https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/constants.py + """ + + if destination and not whitelisted_homeserver(destination): + return + + opentracing.tracer.inject( + opentracing.tracer.active_span, opentracing.Format.TEXT_MAP, carrier + ) + + +def active_span_context_as_string(): + """ + Returns: + The active span context encoded as a string. + """ + carrier = {} + if opentracing: + opentracing.tracer.inject( + opentracing.tracer.active_span, opentracing.Format.TEXT_MAP, carrier + ) + return json.dumps(carrier) + + +@only_if_tracing +def span_context_from_string(carrier): + """ + Returns: + The active span context decoded from a string. + """ + carrier = json.loads(carrier) + return opentracing.tracer.extract(opentracing.Format.TEXT_MAP, carrier) + + +@only_if_tracing +def extract_text_map(carrier): + """ + Wrapper method for opentracing's tracer.extract for TEXT_MAP. + Args: + carrier (dict): a dict possibly containing a span context. + + Returns: + The active span context extracted from carrier. + """ + return opentracing.tracer.extract(opentracing.Format.TEXT_MAP, carrier) + + +# Tracing decorators + + +def trace(func): + """ + Decorator to trace a function. + Sets the operation name to that of the function's. + """ + if opentracing is None: + return func + + @wraps(func) + def _trace_inner(self, *args, **kwargs): + if opentracing is None: + return func(self, *args, **kwargs) + + scope = start_active_span(func.__name__) + scope.__enter__() + + try: + result = func(self, *args, **kwargs) + if isinstance(result, defer.Deferred): + + def call_back(result): + scope.__exit__(None, None, None) + return result + + def err_back(result): + scope.span.set_tag(tags.ERROR, True) + scope.__exit__(None, None, None) + return result + + result.addCallbacks(call_back, err_back) + + else: + scope.__exit__(None, None, None) + + return result + + except Exception as e: + scope.__exit__(type(e), None, e.__traceback__) + raise + + return _trace_inner + + +def trace_using_operation_name(operation_name): + """Decorator to trace a function. Explicitely sets the operation_name.""" + + def trace(func): + """ + Decorator to trace a function. + Sets the operation name to that of the function's. + """ + if opentracing is None: + return func + + @wraps(func) + def _trace_inner(self, *args, **kwargs): + if opentracing is None: + return func(self, *args, **kwargs) + + scope = start_active_span(operation_name) + scope.__enter__() + + try: + result = func(self, *args, **kwargs) + if isinstance(result, defer.Deferred): + + def call_back(result): + scope.__exit__(None, None, None) + return result + + def err_back(result): + scope.span.set_tag(tags.ERROR, True) + scope.__exit__(None, None, None) + return result + + result.addCallbacks(call_back, err_back) + else: + scope.__exit__(None, None, None) + + return result + + except Exception as e: + scope.__exit__(type(e), None, e.__traceback__) + raise + + return _trace_inner + + return trace + + +def tag_args(func): + """ + Tags all of the args to the active span. + """ + + if not opentracing: + return func + + @wraps(func) + def _tag_args_inner(self, *args, **kwargs): + argspec = inspect.getargspec(func) + for i, arg in enumerate(argspec.args[1:]): + set_tag("ARG_" + arg, args[i]) + set_tag("args", args[len(argspec.args) :]) + set_tag("kwargs", kwargs) + return func(self, *args, **kwargs) + + return _tag_args_inner + + def trace_servlet(servlet_name, func): """Decorator which traces a serlet. It starts a span with some servlet specific tags such as the servlet_name and request information""" + if not opentracing: + return func @wraps(func) @defer.inlineCallbacks @@ -477,6 +694,44 @@ def trace_servlet(servlet_name, func): }, ): result = yield defer.maybeDeferred(func, request, *args, **kwargs) - defer.returnValue(result) + defer.returnValue(result) return _trace_servlet_inner + + +# Helper class + + +class _DummyTagNames(object): + """wrapper of opentracings tags. We need to have them if we + want to reference them without opentracing around. Clearly they + should never actually show up in a trace. `set_tags` overwrites + these with the correct ones.""" + + INVALID_TAG = "invalid-tag" + COMPONENT = INVALID_TAG + DATABASE_INSTANCE = INVALID_TAG + DATABASE_STATEMENT = INVALID_TAG + DATABASE_TYPE = INVALID_TAG + DATABASE_USER = INVALID_TAG + ERROR = INVALID_TAG + HTTP_METHOD = INVALID_TAG + HTTP_STATUS_CODE = INVALID_TAG + HTTP_URL = INVALID_TAG + MESSAGE_BUS_DESTINATION = INVALID_TAG + PEER_ADDRESS = INVALID_TAG + PEER_HOSTNAME = INVALID_TAG + PEER_HOST_IPV4 = INVALID_TAG + PEER_HOST_IPV6 = INVALID_TAG + PEER_PORT = INVALID_TAG + PEER_SERVICE = INVALID_TAG + SAMPLING_PRIORITY = INVALID_TAG + SERVICE = INVALID_TAG + SPAN_KIND = INVALID_TAG + SPAN_KIND_CONSUMER = INVALID_TAG + SPAN_KIND_PRODUCER = INVALID_TAG + SPAN_KIND_RPC_CLIENT = INVALID_TAG + SPAN_KIND_RPC_SERVER = INVALID_TAG + + +tags = _DummyTagNames diff --git a/synapse/logging/scopecontextmanager.py b/synapse/logging/scopecontextmanager.py index 8c661302c9..4eed4f2338 100644 --- a/synapse/logging/scopecontextmanager.py +++ b/synapse/logging/scopecontextmanager.py @@ -131,7 +131,7 @@ class _LogContextScope(Scope): def close(self): if self.manager.active is not self: - logger.error("Tried to close a none active scope!") + logger.error("Tried to close a non-active scope!") return if self._finish_on_close: From fadfde9aaaf37da6b3f8f4ca27a028f43cc8a3f3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Jul 2019 13:32:37 +0100 Subject: [PATCH 26/72] Newsfile --- changelog.d/5743.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5743.bugfix diff --git a/changelog.d/5743.bugfix b/changelog.d/5743.bugfix new file mode 100644 index 0000000000..a160e9945f --- /dev/null +++ b/changelog.d/5743.bugfix @@ -0,0 +1 @@ +Log when we receive receipt from a different origin. From 4806651744616bf48abf408034ab9560e33f60ce Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 23 Jul 2019 23:00:55 +1000 Subject: [PATCH 27/72] Replace returnValue with return (#5736) --- changelog.d/5736.misc | 1 + docs/log_contexts.rst | 2 +- synapse/api/auth.py | 44 ++++--- synapse/api/filtering.py | 2 +- synapse/app/frontend_proxy.py | 8 +- synapse/app/homeserver.py | 2 +- synapse/appservice/__init__.py | 28 ++--- synapse/appservice/api.py | 38 +++--- synapse/appservice/scheduler.py | 4 +- synapse/crypto/keyring.py | 14 +-- synapse/events/builder.py | 16 ++- synapse/events/snapshot.py | 28 ++--- synapse/events/third_party_rules.py | 8 +- synapse/events/utils.py | 4 +- synapse/federation/federation_base.py | 6 +- synapse/federation/federation_client.py | 46 ++++--- synapse/federation/federation_server.py | 77 +++++------- .../sender/per_destination_queue.py | 4 +- .../federation/sender/transaction_manager.py | 2 +- synapse/federation/transport/client.py | 30 ++--- synapse/groups/attestations.py | 2 +- synapse/groups/groups_server.py | 92 +++++++------- synapse/handlers/account_data.py | 4 +- synapse/handlers/account_validity.py | 6 +- synapse/handlers/acme.py | 2 +- synapse/handlers/admin.py | 10 +- synapse/handlers/appservice.py | 22 ++-- synapse/handlers/auth.py | 44 +++---- synapse/handlers/deactivate_account.py | 2 +- synapse/handlers/device.py | 22 ++-- synapse/handlers/directory.py | 14 +-- synapse/handlers/e2e_keys.py | 10 +- synapse/handlers/e2e_room_keys.py | 8 +- synapse/handlers/events.py | 6 +- synapse/handlers/federation.py | 82 +++++++------ synapse/handlers/groups_local.py | 26 ++-- synapse/handlers/identity.py | 18 +-- synapse/handlers/initial_sync.py | 54 ++++----- synapse/handlers/message.py | 32 +++-- synapse/handlers/pagination.py | 14 +-- synapse/handlers/presence.py | 54 ++++----- synapse/handlers/profile.py | 18 +-- synapse/handlers/receipts.py | 14 +-- synapse/handlers/register.py | 16 +-- synapse/handlers/room.py | 16 +-- synapse/handlers/room_list.py | 10 +- synapse/handlers/room_member.py | 42 +++---- synapse/handlers/room_member_worker.py | 2 +- synapse/handlers/search.py | 14 +-- synapse/handlers/state_deltas.py | 8 +- synapse/handlers/stats.py | 6 +- synapse/handlers/sync.py | 112 ++++++++---------- synapse/handlers/typing.py | 4 +- synapse/handlers/user_directory.py | 2 +- synapse/http/client.py | 28 ++--- .../federation/matrix_federation_agent.py | 46 ++++--- synapse/http/federation/srv_resolver.py | 8 +- synapse/http/matrixfederationclient.py | 16 +-- synapse/logging/opentracing.py | 6 +- synapse/module_api/__init__.py | 2 +- synapse/notifier.py | 18 +-- synapse/push/bulk_push_rule_evaluator.py | 10 +- synapse/push/httppusher.py | 18 +-- synapse/push/mailer.py | 84 ++++++------- synapse/push/presentable_names.py | 25 ++-- synapse/push/push_tools.py | 4 +- synapse/push/pusherpool.py | 6 +- synapse/replication/http/_base.py | 2 +- synapse/replication/http/federation.py | 10 +- synapse/replication/http/login.py | 2 +- synapse/replication/http/membership.py | 4 +- synapse/replication/http/register.py | 4 +- synapse/replication/http/send_event.py | 4 +- synapse/replication/tcp/streams/_base.py | 12 +- synapse/replication/tcp/streams/events.py | 2 +- synapse/rest/admin/__init__.py | 48 ++++---- synapse/rest/admin/server_notice_servlet.py | 2 +- synapse/rest/client/v1/directory.py | 18 ++- synapse/rest/client/v1/events.py | 6 +- synapse/rest/client/v1/initial_sync.py | 2 +- synapse/rest/client/v1/login.py | 14 +-- synapse/rest/client/v1/logout.py | 4 +- synapse/rest/client/v1/presence.py | 4 +- synapse/rest/client/v1/profile.py | 14 +-- synapse/rest/client/v1/push_rule.py | 10 +- synapse/rest/client/v1/pusher.py | 8 +- synapse/rest/client/v1/room.py | 46 +++---- synapse/rest/client/v1/voip.py | 20 ++-- synapse/rest/client/v2_alpha/account.py | 32 ++--- synapse/rest/client/v2_alpha/account_data.py | 8 +- .../rest/client/v2_alpha/account_validity.py | 4 +- synapse/rest/client/v2_alpha/auth.py | 4 +- synapse/rest/client/v2_alpha/capabilities.py | 2 +- synapse/rest/client/v2_alpha/devices.py | 10 +- synapse/rest/client/v2_alpha/filter.py | 4 +- synapse/rest/client/v2_alpha/groups.py | 64 +++++----- synapse/rest/client/v2_alpha/keys.py | 8 +- synapse/rest/client/v2_alpha/notifications.py | 4 +- synapse/rest/client/v2_alpha/openid.py | 18 ++- synapse/rest/client/v2_alpha/read_marker.py | 2 +- synapse/rest/client/v2_alpha/receipts.py | 2 +- synapse/rest/client/v2_alpha/register.py | 38 +++--- synapse/rest/client/v2_alpha/relations.py | 8 +- synapse/rest/client/v2_alpha/report_event.py | 2 +- synapse/rest/client/v2_alpha/room_keys.py | 14 +-- .../v2_alpha/room_upgrade_rest_servlet.py | 2 +- synapse/rest/client/v2_alpha/sendtodevice.py | 2 +- synapse/rest/client/v2_alpha/sync.py | 48 ++++---- synapse/rest/client/v2_alpha/tags.py | 6 +- synapse/rest/client/v2_alpha/thirdparty.py | 10 +- .../rest/client/v2_alpha/user_directory.py | 4 +- synapse/rest/media/v1/media_repository.py | 18 +-- synapse/rest/media/v1/media_storage.py | 12 +- synapse/rest/media/v1/preview_url_resource.py | 34 +++--- .../resource_limits_server_notices.py | 2 +- .../server_notices/server_notices_manager.py | 6 +- synapse/state/__init__.py | 38 +++--- synapse/state/v1.py | 8 +- synapse/state/v2.py | 26 ++-- synapse/storage/__init__.py | 2 +- synapse/storage/_base.py | 14 +-- synapse/storage/account_data.py | 14 +-- synapse/storage/appservice.py | 14 +-- synapse/storage/background_updates.py | 20 ++-- synapse/storage/client_ips.py | 26 ++-- synapse/storage/deviceinbox.py | 10 +- synapse/storage/devices.py | 32 ++--- synapse/storage/directory.py | 10 +- synapse/storage/e2e_room_keys.py | 6 +- synapse/storage/end_to_end_keys.py | 8 +- synapse/storage/event_federation.py | 12 +- synapse/storage/event_push_actions.py | 16 +-- synapse/storage/events.py | 34 +++--- synapse/storage/events_bg_updates.py | 6 +- synapse/storage/events_worker.py | 20 ++-- synapse/storage/filtering.py | 4 +- synapse/storage/group_server.py | 38 +++--- synapse/storage/monthly_active_users.py | 2 +- synapse/storage/presence.py | 6 +- synapse/storage/profile.py | 12 +- synapse/storage/push_rule.py | 18 ++- synapse/storage/pusher.py | 10 +- synapse/storage/receipts.py | 36 +++--- synapse/storage/registration.py | 34 +++--- synapse/storage/relations.py | 4 +- synapse/storage/room.py | 10 +- synapse/storage/roommember.py | 42 ++++--- synapse/storage/search.py | 56 ++++----- synapse/storage/signatures.py | 2 +- synapse/storage/state.py | 54 ++++----- synapse/storage/stats.py | 16 +-- synapse/storage/stream.py | 44 ++++--- synapse/storage/tags.py | 12 +- synapse/storage/transactions.py | 4 +- synapse/storage/user_directory.py | 30 ++--- synapse/storage/user_erasure_store.py | 5 +- synapse/streams/events.py | 4 +- synapse/util/__init__.py | 2 +- synapse/util/async_helpers.py | 4 +- synapse/util/caches/descriptors.py | 2 +- synapse/util/caches/response_cache.py | 2 +- synapse/util/metrics.py | 2 +- synapse/util/retryutils.py | 16 ++- synapse/visibility.py | 8 +- tests/crypto/test_keyring.py | 6 +- tests/handlers/test_register.py | 2 +- .../test_matrix_federation_agent.py | 4 +- tests/http/federation/test_srv_resolver.py | 2 +- tests/http/test_fedclient.py | 2 +- tests/rest/client/test_transactions.py | 2 +- tests/storage/test_background_update.py | 4 +- tests/storage/test_redaction.py | 4 +- tests/storage/test_roommember.py | 2 +- tests/storage/test_state.py | 2 +- tests/test_visibility.py | 6 +- tests/util/caches/test_descriptors.py | 8 +- tests/utils.py | 6 +- 177 files changed, 1359 insertions(+), 1513 deletions(-) create mode 100644 changelog.d/5736.misc mode change 100755 => 100644 synapse/app/homeserver.py diff --git a/changelog.d/5736.misc b/changelog.d/5736.misc new file mode 100644 index 0000000000..5713b8b32d --- /dev/null +++ b/changelog.d/5736.misc @@ -0,0 +1 @@ +Replace uses of returnValue with plain return, as returnValue is not needed on Python 3. diff --git a/docs/log_contexts.rst b/docs/log_contexts.rst index f5cd5de8ab..4502cd9454 100644 --- a/docs/log_contexts.rst +++ b/docs/log_contexts.rst @@ -148,7 +148,7 @@ call any other functions. d = more_stuff() result = yield d # also fine, of course - defer.returnValue(result) + return result def nonInlineCallbacksFun(): logger.debug("just a wrapper really") diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 7ce6540bdd..351790cca4 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -128,7 +128,7 @@ class Auth(object): ) self._check_joined_room(member, user_id, room_id) - defer.returnValue(member) + return member @defer.inlineCallbacks def check_user_was_in_room(self, room_id, user_id): @@ -156,13 +156,13 @@ class Auth(object): if forgot: raise AuthError(403, "User %s not in room %s" % (user_id, room_id)) - defer.returnValue(member) + return member @defer.inlineCallbacks def check_host_in_room(self, room_id, host): with Measure(self.clock, "check_host_in_room"): latest_event_ids = yield self.store.is_host_joined(room_id, host) - defer.returnValue(latest_event_ids) + return latest_event_ids def _check_joined_room(self, member, user_id, room_id): if not member or member.membership != Membership.JOIN: @@ -219,9 +219,7 @@ class Auth(object): device_id="dummy-device", # stubbed ) - defer.returnValue( - synapse.types.create_requester(user_id, app_service=app_service) - ) + return synapse.types.create_requester(user_id, app_service=app_service) user_info = yield self.get_user_by_access_token(access_token, rights) user = user_info["user"] @@ -262,10 +260,8 @@ class Auth(object): request.authenticated_entity = user.to_string() - defer.returnValue( - synapse.types.create_requester( - user, token_id, is_guest, device_id, app_service=app_service - ) + return synapse.types.create_requester( + user, token_id, is_guest, device_id, app_service=app_service ) except KeyError: raise MissingClientTokenError() @@ -276,25 +272,25 @@ class Auth(object): self.get_access_token_from_request(request) ) if app_service is None: - defer.returnValue((None, None)) + return (None, None) if app_service.ip_range_whitelist: ip_address = IPAddress(self.hs.get_ip_from_request(request)) if ip_address not in app_service.ip_range_whitelist: - defer.returnValue((None, None)) + return (None, None) if b"user_id" not in request.args: - defer.returnValue((app_service.sender, app_service)) + return (app_service.sender, app_service) user_id = request.args[b"user_id"][0].decode("utf8") if app_service.sender == user_id: - defer.returnValue((app_service.sender, app_service)) + return (app_service.sender, app_service) if not app_service.is_interested_in_user(user_id): raise AuthError(403, "Application service cannot masquerade as this user.") if not (yield self.store.get_user_by_id(user_id)): raise AuthError(403, "Application service has not registered this user") - defer.returnValue((user_id, app_service)) + return (user_id, app_service) @defer.inlineCallbacks def get_user_by_access_token(self, token, rights="access"): @@ -330,7 +326,7 @@ class Auth(object): msg="Access token has expired", soft_logout=True ) - defer.returnValue(r) + return r # otherwise it needs to be a valid macaroon try: @@ -378,7 +374,7 @@ class Auth(object): } else: raise RuntimeError("Unknown rights setting %s", rights) - defer.returnValue(ret) + return ret except ( _InvalidMacaroonException, pymacaroons.exceptions.MacaroonException, @@ -506,7 +502,7 @@ class Auth(object): def _look_up_user_by_access_token(self, token): ret = yield self.store.get_user_by_access_token(token) if not ret: - defer.returnValue(None) + return None # we use ret.get() below because *lots* of unit tests stub out # get_user_by_access_token in a way where it only returns a couple of @@ -518,7 +514,7 @@ class Auth(object): "device_id": ret.get("device_id"), "valid_until_ms": ret.get("valid_until_ms"), } - defer.returnValue(user_info) + return user_info def get_appservice_by_req(self, request): token = self.get_access_token_from_request(request) @@ -543,7 +539,7 @@ class Auth(object): @defer.inlineCallbacks def compute_auth_events(self, event, current_state_ids, for_verification=False): if event.type == EventTypes.Create: - defer.returnValue([]) + return [] auth_ids = [] @@ -604,7 +600,7 @@ class Auth(object): if member_event.content["membership"] == Membership.JOIN: auth_ids.append(member_event.event_id) - defer.returnValue(auth_ids) + return auth_ids @defer.inlineCallbacks def check_can_change_room_list(self, room_id, user): @@ -618,7 +614,7 @@ class Auth(object): is_admin = yield self.is_server_admin(user) if is_admin: - defer.returnValue(True) + return True user_id = user.to_string() yield self.check_joined_room(room_id, user_id) @@ -712,7 +708,7 @@ class Auth(object): # * The user is a guest user, and has joined the room # else it will throw. member_event = yield self.check_user_was_in_room(room_id, user_id) - defer.returnValue((member_event.membership, member_event.event_id)) + return (member_event.membership, member_event.event_id) except AuthError: visibility = yield self.state.get_current_state( room_id, EventTypes.RoomHistoryVisibility, "" @@ -721,7 +717,7 @@ class Auth(object): visibility and visibility.content["history_visibility"] == "world_readable" ): - defer.returnValue((Membership.JOIN, None)) + return (Membership.JOIN, None) return raise AuthError( 403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index 9b3daca29b..9f06556bd2 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -132,7 +132,7 @@ class Filtering(object): @defer.inlineCallbacks def get_user_filter(self, user_localpart, filter_id): result = yield self.store.get_user_filter(user_localpart, filter_id) - defer.returnValue(FilterCollection(result)) + return FilterCollection(result) def add_user_filter(self, user_localpart, user_filter): self.check_valid_filter(user_filter) diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index 5b563c2778..e2822ca848 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -70,12 +70,12 @@ class PresenceStatusStubServlet(RestServlet): except HttpResponseException as e: raise e.to_synapse_error() - defer.returnValue((200, result)) + return (200, result) @defer.inlineCallbacks def on_PUT(self, request, user_id): yield self.auth.get_user_by_req(request) - defer.returnValue((200, {})) + return (200, {}) class KeyUploadServlet(RestServlet): @@ -126,11 +126,11 @@ class KeyUploadServlet(RestServlet): self.main_uri + request.uri.decode("ascii"), body, headers=headers ) - defer.returnValue((200, result)) + return (200, result) else: # Just interested in counts. result = yield self.store.count_e2e_one_time_keys(user_id, device_id) - defer.returnValue((200, {"one_time_key_counts": result})) + return (200, {"one_time_key_counts": result}) class FrontendProxySlavedStore( diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py old mode 100755 new mode 100644 index 34c3f5ee99..7d6b51b5bc --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -406,7 +406,7 @@ def setup(config_options): if provision: yield acme.provision_certificate() - defer.returnValue(provision) + return provision @defer.inlineCallbacks def reprovision_acme(): diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index b26a31dd54..33b3579425 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -175,21 +175,21 @@ class ApplicationService(object): @defer.inlineCallbacks def _matches_user(self, event, store): if not event: - defer.returnValue(False) + return False if self.is_interested_in_user(event.sender): - defer.returnValue(True) + return True # also check m.room.member state key if event.type == EventTypes.Member and self.is_interested_in_user( event.state_key ): - defer.returnValue(True) + return True if not store: - defer.returnValue(False) + return False does_match = yield self._matches_user_in_member_list(event.room_id, store) - defer.returnValue(does_match) + return does_match @cachedInlineCallbacks(num_args=1, cache_context=True) def _matches_user_in_member_list(self, room_id, store, cache_context): @@ -200,8 +200,8 @@ class ApplicationService(object): # check joined member events for user_id in member_list: if self.is_interested_in_user(user_id): - defer.returnValue(True) - defer.returnValue(False) + return True + return False def _matches_room_id(self, event): if hasattr(event, "room_id"): @@ -211,13 +211,13 @@ class ApplicationService(object): @defer.inlineCallbacks def _matches_aliases(self, event, store): if not store or not event: - defer.returnValue(False) + return False alias_list = yield store.get_aliases_for_room(event.room_id) for alias in alias_list: if self.is_interested_in_alias(alias): - defer.returnValue(True) - defer.returnValue(False) + return True + return False @defer.inlineCallbacks def is_interested(self, event, store=None): @@ -231,15 +231,15 @@ class ApplicationService(object): """ # Do cheap checks first if self._matches_room_id(event): - defer.returnValue(True) + return True if (yield self._matches_aliases(event, store)): - defer.returnValue(True) + return True if (yield self._matches_user(event, store)): - defer.returnValue(True) + return True - defer.returnValue(False) + return False def is_interested_in_user(self, user_id): return ( diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index 571881775b..007ca75a94 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -97,40 +97,40 @@ class ApplicationServiceApi(SimpleHttpClient): @defer.inlineCallbacks def query_user(self, service, user_id): if service.url is None: - defer.returnValue(False) + return False uri = service.url + ("/users/%s" % urllib.parse.quote(user_id)) response = None try: response = yield self.get_json(uri, {"access_token": service.hs_token}) if response is not None: # just an empty json object - defer.returnValue(True) + return True except CodeMessageException as e: if e.code == 404: - defer.returnValue(False) + return False return logger.warning("query_user to %s received %s", uri, e.code) except Exception as ex: logger.warning("query_user to %s threw exception %s", uri, ex) - defer.returnValue(False) + return False @defer.inlineCallbacks def query_alias(self, service, alias): if service.url is None: - defer.returnValue(False) + return False uri = service.url + ("/rooms/%s" % urllib.parse.quote(alias)) response = None try: response = yield self.get_json(uri, {"access_token": service.hs_token}) if response is not None: # just an empty json object - defer.returnValue(True) + return True except CodeMessageException as e: logger.warning("query_alias to %s received %s", uri, e.code) if e.code == 404: - defer.returnValue(False) + return False return except Exception as ex: logger.warning("query_alias to %s threw exception %s", uri, ex) - defer.returnValue(False) + return False @defer.inlineCallbacks def query_3pe(self, service, kind, protocol, fields): @@ -141,7 +141,7 @@ class ApplicationServiceApi(SimpleHttpClient): else: raise ValueError("Unrecognised 'kind' argument %r to query_3pe()", kind) if service.url is None: - defer.returnValue([]) + return [] uri = "%s%s/thirdparty/%s/%s" % ( service.url, @@ -155,7 +155,7 @@ class ApplicationServiceApi(SimpleHttpClient): logger.warning( "query_3pe to %s returned an invalid response %r", uri, response ) - defer.returnValue([]) + return [] ret = [] for r in response: @@ -166,14 +166,14 @@ class ApplicationServiceApi(SimpleHttpClient): "query_3pe to %s returned an invalid result %r", uri, r ) - defer.returnValue(ret) + return ret except Exception as ex: logger.warning("query_3pe to %s threw exception %s", uri, ex) - defer.returnValue([]) + return [] def get_3pe_protocol(self, service, protocol): if service.url is None: - defer.returnValue({}) + return {} @defer.inlineCallbacks def _get(): @@ -189,7 +189,7 @@ class ApplicationServiceApi(SimpleHttpClient): logger.warning( "query_3pe_protocol to %s did not return a" " valid result", uri ) - defer.returnValue(None) + return None for instance in info.get("instances", []): network_id = instance.get("network_id", None) @@ -198,10 +198,10 @@ class ApplicationServiceApi(SimpleHttpClient): service.id, network_id ).to_string() - defer.returnValue(info) + return info except Exception as ex: logger.warning("query_3pe_protocol to %s threw exception %s", uri, ex) - defer.returnValue(None) + return None key = (service.id, protocol) return self.protocol_meta_cache.wrap(key, _get) @@ -209,7 +209,7 @@ class ApplicationServiceApi(SimpleHttpClient): @defer.inlineCallbacks def push_bulk(self, service, events, txn_id=None): if service.url is None: - defer.returnValue(True) + return True events = self._serialize(events) @@ -229,14 +229,14 @@ class ApplicationServiceApi(SimpleHttpClient): ) sent_transactions_counter.labels(service.id).inc() sent_events_counter.labels(service.id).inc(len(events)) - defer.returnValue(True) + return True return except CodeMessageException as e: logger.warning("push_bulk to %s received %s", uri, e.code) except Exception as ex: logger.warning("push_bulk to %s threw exception %s", uri, ex) failed_transactions_counter.labels(service.id).inc() - defer.returnValue(False) + return False def _serialize(self, events): time_now = self.clock.time_msec() diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index e5b36494f5..42a350bff8 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -193,7 +193,7 @@ class _TransactionController(object): @defer.inlineCallbacks def _is_service_up(self, service): state = yield self.store.get_appservice_state(service) - defer.returnValue(state == ApplicationServiceState.UP or state is None) + return state == ApplicationServiceState.UP or state is None class _Recoverer(object): @@ -208,7 +208,7 @@ class _Recoverer(object): r.service.id, ) r.recover() - defer.returnValue(recoverers) + return recoverers def __init__(self, clock, store, as_api, service, callback): self.clock = clock diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index e8bb420ad1..6c3e885e72 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -462,7 +462,7 @@ class StoreKeyFetcher(KeyFetcher): keys = {} for (server_name, key_id), key in res.items(): keys.setdefault(server_name, {})[key_id] = key - defer.returnValue(keys) + return keys class BaseV2KeyFetcher(object): @@ -566,7 +566,7 @@ class BaseV2KeyFetcher(object): ).addErrback(unwrapFirstError) ) - defer.returnValue(verify_keys) + return verify_keys class PerspectivesKeyFetcher(BaseV2KeyFetcher): @@ -588,7 +588,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): result = yield self.get_server_verify_key_v2_indirect( keys_to_fetch, key_server ) - defer.returnValue(result) + return result except KeyLookupError as e: logger.warning( "Key lookup failed from %r: %s", key_server.server_name, e @@ -601,7 +601,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): str(e), ) - defer.returnValue({}) + return {} results = yield make_deferred_yieldable( defer.gatherResults( @@ -615,7 +615,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): for server_name, keys in result.items(): union_of_keys.setdefault(server_name, {}).update(keys) - defer.returnValue(union_of_keys) + return union_of_keys @defer.inlineCallbacks def get_server_verify_key_v2_indirect(self, keys_to_fetch, key_server): @@ -701,7 +701,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): perspective_name, time_now_ms, added_keys ) - defer.returnValue(keys) + return keys def _validate_perspectives_response(self, key_server, response): """Optionally check the signature on the result of a /key/query request @@ -843,7 +843,7 @@ class ServerKeyFetcher(BaseV2KeyFetcher): ) keys.update(response_keys) - defer.returnValue(keys) + return keys @defer.inlineCallbacks diff --git a/synapse/events/builder.py b/synapse/events/builder.py index db011e0407..3997751337 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -144,15 +144,13 @@ class EventBuilder(object): if self._origin_server_ts is not None: event_dict["origin_server_ts"] = self._origin_server_ts - defer.returnValue( - create_local_event_from_event_dict( - clock=self._clock, - hostname=self._hostname, - signing_key=self._signing_key, - format_version=self.format_version, - event_dict=event_dict, - internal_metadata_dict=self.internal_metadata.get_dict(), - ) + return create_local_event_from_event_dict( + clock=self._clock, + hostname=self._hostname, + signing_key=self._signing_key, + format_version=self.format_version, + event_dict=event_dict, + internal_metadata_dict=self.internal_metadata.get_dict(), ) diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index a9545e6c1b..acbcbeeced 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -133,19 +133,17 @@ class EventContext(object): else: prev_state_id = None - defer.returnValue( - { - "prev_state_id": prev_state_id, - "event_type": event.type, - "event_state_key": event.state_key if event.is_state() else None, - "state_group": self.state_group, - "rejected": self.rejected, - "prev_group": self.prev_group, - "delta_ids": _encode_state_dict(self.delta_ids), - "prev_state_events": self.prev_state_events, - "app_service_id": self.app_service.id if self.app_service else None, - } - ) + return { + "prev_state_id": prev_state_id, + "event_type": event.type, + "event_state_key": event.state_key if event.is_state() else None, + "state_group": self.state_group, + "rejected": self.rejected, + "prev_group": self.prev_group, + "delta_ids": _encode_state_dict(self.delta_ids), + "prev_state_events": self.prev_state_events, + "app_service_id": self.app_service.id if self.app_service else None, + } @staticmethod def deserialize(store, input): @@ -202,7 +200,7 @@ class EventContext(object): yield make_deferred_yieldable(self._fetching_state_deferred) - defer.returnValue(self._current_state_ids) + return self._current_state_ids @defer.inlineCallbacks def get_prev_state_ids(self, store): @@ -222,7 +220,7 @@ class EventContext(object): yield make_deferred_yieldable(self._fetching_state_deferred) - defer.returnValue(self._prev_state_ids) + return self._prev_state_ids def get_cached_current_state_ids(self): """Gets the current state IDs if we have them already cached. diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index 8f5d95696b..714a9b1579 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -51,7 +51,7 @@ class ThirdPartyEventRules(object): defer.Deferred[bool]: True if the event should be allowed, False if not. """ if self.third_party_rules is None: - defer.returnValue(True) + return True prev_state_ids = yield context.get_prev_state_ids(self.store) @@ -61,7 +61,7 @@ class ThirdPartyEventRules(object): state_events[key] = yield self.store.get_event(event_id, allow_none=True) ret = yield self.third_party_rules.check_event_allowed(event, state_events) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def on_create_room(self, requester, config, is_requester_admin): @@ -98,7 +98,7 @@ class ThirdPartyEventRules(object): """ if self.third_party_rules is None: - defer.returnValue(True) + return True state_ids = yield self.store.get_filtered_current_state_ids(room_id) room_state_events = yield self.store.get_events(state_ids.values()) @@ -110,4 +110,4 @@ class ThirdPartyEventRules(object): ret = yield self.third_party_rules.check_threepid_can_be_invited( medium, address, state_events ) - defer.returnValue(ret) + return ret diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 9487a886f5..07d1c5bcf0 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -360,7 +360,7 @@ class EventClientSerializer(object): """ # To handle the case of presence events and the like if not isinstance(event, EventBase): - defer.returnValue(event) + return event event_id = event.event_id serialized_event = serialize_event(event, time_now, **kwargs) @@ -406,7 +406,7 @@ class EventClientSerializer(object): "sender": edit.sender, } - defer.returnValue(serialized_event) + return serialized_event def serialize_events(self, events, time_now, **kwargs): """Serializes multiple events. diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index f7bb806ae7..5a1e23a145 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -106,7 +106,7 @@ class FederationBase(object): "Failed to find copy of %s with valid signature", pdu.event_id ) - defer.returnValue(res) + return res handle = preserve_fn(handle_check_result) deferreds2 = [handle(pdu, deferred) for pdu, deferred in zip(pdus, deferreds)] @@ -116,9 +116,9 @@ class FederationBase(object): ).addErrback(unwrapFirstError) if include_none: - defer.returnValue(valid_pdus) + return valid_pdus else: - defer.returnValue([p for p in valid_pdus if p]) + return [p for p in valid_pdus if p] def _check_sigs_and_hash(self, room_version, pdu): return make_deferred_yieldable( diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 3cb4b94420..25ed1257f1 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -213,7 +213,7 @@ class FederationClient(FederationBase): ).addErrback(unwrapFirstError) ) - defer.returnValue(pdus) + return pdus @defer.inlineCallbacks @log_function @@ -245,7 +245,7 @@ class FederationClient(FederationBase): ev = self._get_pdu_cache.get(event_id) if ev: - defer.returnValue(ev) + return ev pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {}) @@ -307,7 +307,7 @@ class FederationClient(FederationBase): if signed_pdu: self._get_pdu_cache[event_id] = signed_pdu - defer.returnValue(signed_pdu) + return signed_pdu @defer.inlineCallbacks @log_function @@ -355,7 +355,7 @@ class FederationClient(FederationBase): auth_chain.sort(key=lambda e: e.depth) - defer.returnValue((pdus, auth_chain)) + return (pdus, auth_chain) except HttpResponseException as e: if e.code == 400 or e.code == 404: logger.info("Failed to use get_room_state_ids API, falling back") @@ -404,7 +404,7 @@ class FederationClient(FederationBase): signed_auth.sort(key=lambda e: e.depth) - defer.returnValue((signed_pdus, signed_auth)) + return (signed_pdus, signed_auth) @defer.inlineCallbacks def get_events_from_store_or_dest(self, destination, room_id, event_ids): @@ -429,7 +429,7 @@ class FederationClient(FederationBase): missing_events.discard(k) if not missing_events: - defer.returnValue((signed_events, failed_to_fetch)) + return (signed_events, failed_to_fetch) logger.debug( "Fetching unknown state/auth events %s for room %s", @@ -465,7 +465,7 @@ class FederationClient(FederationBase): # We removed all events we successfully fetched from `batch` failed_to_fetch.update(batch) - defer.returnValue((signed_events, failed_to_fetch)) + return (signed_events, failed_to_fetch) @defer.inlineCallbacks @log_function @@ -485,7 +485,7 @@ class FederationClient(FederationBase): signed_auth.sort(key=lambda e: e.depth) - defer.returnValue(signed_auth) + return signed_auth @defer.inlineCallbacks def _try_destination_list(self, description, destinations, callback): @@ -521,7 +521,7 @@ class FederationClient(FederationBase): try: res = yield callback(destination) - defer.returnValue(res) + return res except InvalidResponseError as e: logger.warn("Failed to %s via %s: %s", description, destination, e) except HttpResponseException as e: @@ -615,7 +615,7 @@ class FederationClient(FederationBase): event_dict=pdu_dict, ) - defer.returnValue((destination, ev, event_format)) + return (destination, ev, event_format) return self._try_destination_list( "make_" + membership, destinations, send_request @@ -728,13 +728,11 @@ class FederationClient(FederationBase): check_authchain_validity(signed_auth) - defer.returnValue( - { - "state": signed_state, - "auth_chain": signed_auth, - "origin": destination, - } - ) + return { + "state": signed_state, + "auth_chain": signed_auth, + "origin": destination, + } return self._try_destination_list("send_join", destinations, send_request) @@ -758,7 +756,7 @@ class FederationClient(FederationBase): # FIXME: We should handle signature failures more gracefully. - defer.returnValue(pdu) + return pdu @defer.inlineCallbacks def _do_send_invite(self, destination, pdu, room_version): @@ -786,7 +784,7 @@ class FederationClient(FederationBase): "invite_room_state": pdu.unsigned.get("invite_room_state", []), }, ) - defer.returnValue(content) + return content except HttpResponseException as e: if e.code in [400, 404]: err = e.to_synapse_error() @@ -821,7 +819,7 @@ class FederationClient(FederationBase): event_id=pdu.event_id, content=pdu.get_pdu_json(time_now), ) - defer.returnValue(content) + return content def send_leave(self, destinations, pdu): """Sends a leave event to one of a list of homeservers. @@ -856,7 +854,7 @@ class FederationClient(FederationBase): ) logger.debug("Got content: %s", content) - defer.returnValue(None) + return None return self._try_destination_list("send_leave", destinations, send_request) @@ -917,7 +915,7 @@ class FederationClient(FederationBase): "missing": content.get("missing", []), } - defer.returnValue(ret) + return ret @defer.inlineCallbacks def get_missing_events( @@ -974,7 +972,7 @@ class FederationClient(FederationBase): # get_missing_events signed_events = [] - defer.returnValue(signed_events) + return signed_events @defer.inlineCallbacks def forward_third_party_invite(self, destinations, room_id, event_dict): @@ -986,7 +984,7 @@ class FederationClient(FederationBase): yield self.transport_layer.exchange_third_party_invite( destination=destination, room_id=room_id, event_dict=event_dict ) - defer.returnValue(None) + return None except CodeMessageException: raise except Exception as e: diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 8c0a18b120..b4b9a05ca6 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -99,7 +99,7 @@ class FederationServer(FederationBase): res = self._transaction_from_pdus(pdus).get_dict() - defer.returnValue((200, res)) + return (200, res) @defer.inlineCallbacks @log_function @@ -126,7 +126,7 @@ class FederationServer(FederationBase): origin, transaction, request_time ) - defer.returnValue(result) + return result @defer.inlineCallbacks def _handle_incoming_transaction(self, origin, transaction, request_time): @@ -147,8 +147,7 @@ class FederationServer(FederationBase): "[%s] We've already responded to this request", transaction.transaction_id, ) - defer.returnValue(response) - return + return response logger.debug("[%s] Transaction is new", transaction.transaction_id) @@ -163,7 +162,7 @@ class FederationServer(FederationBase): yield self.transaction_actions.set_response( origin, transaction, 400, response ) - defer.returnValue((400, response)) + return (400, response) received_pdus_counter.inc(len(transaction.pdus)) @@ -265,7 +264,7 @@ class FederationServer(FederationBase): logger.debug("Returning: %s", str(response)) yield self.transaction_actions.set_response(origin, transaction, 200, response) - defer.returnValue((200, response)) + return (200, response) @defer.inlineCallbacks def received_edu(self, origin, edu_type, content): @@ -298,7 +297,7 @@ class FederationServer(FederationBase): event_id, ) - defer.returnValue((200, resp)) + return (200, resp) @defer.inlineCallbacks def on_state_ids_request(self, origin, room_id, event_id): @@ -315,9 +314,7 @@ class FederationServer(FederationBase): state_ids = yield self.handler.get_state_ids_for_pdu(room_id, event_id) auth_chain_ids = yield self.store.get_auth_chain_ids(state_ids) - defer.returnValue( - (200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids}) - ) + return (200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids}) @defer.inlineCallbacks def _on_context_state_request_compute(self, room_id, event_id): @@ -336,12 +333,10 @@ class FederationServer(FederationBase): ) ) - defer.returnValue( - { - "pdus": [pdu.get_pdu_json() for pdu in pdus], - "auth_chain": [pdu.get_pdu_json() for pdu in auth_chain], - } - ) + return { + "pdus": [pdu.get_pdu_json() for pdu in pdus], + "auth_chain": [pdu.get_pdu_json() for pdu in auth_chain], + } @defer.inlineCallbacks @log_function @@ -349,15 +344,15 @@ class FederationServer(FederationBase): pdu = yield self.handler.get_persisted_pdu(origin, event_id) if pdu: - defer.returnValue((200, self._transaction_from_pdus([pdu]).get_dict())) + return (200, self._transaction_from_pdus([pdu]).get_dict()) else: - defer.returnValue((404, "")) + return (404, "") @defer.inlineCallbacks def on_query_request(self, query_type, args): received_queries_counter.labels(query_type).inc() resp = yield self.registry.on_query(query_type, args) - defer.returnValue((200, resp)) + return (200, resp) @defer.inlineCallbacks def on_make_join_request(self, origin, room_id, user_id, supported_versions): @@ -371,9 +366,7 @@ class FederationServer(FederationBase): pdu = yield self.handler.on_make_join_request(room_id, user_id) time_now = self._clock.time_msec() - defer.returnValue( - {"event": pdu.get_pdu_json(time_now), "room_version": room_version} - ) + return {"event": pdu.get_pdu_json(time_now), "room_version": room_version} @defer.inlineCallbacks def on_invite_request(self, origin, content, room_version): @@ -391,7 +384,7 @@ class FederationServer(FederationBase): yield self.check_server_matches_acl(origin_host, pdu.room_id) ret_pdu = yield self.handler.on_invite_request(origin, pdu) time_now = self._clock.time_msec() - defer.returnValue({"event": ret_pdu.get_pdu_json(time_now)}) + return {"event": ret_pdu.get_pdu_json(time_now)} @defer.inlineCallbacks def on_send_join_request(self, origin, content, room_id): @@ -407,16 +400,14 @@ class FederationServer(FederationBase): logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures) res_pdus = yield self.handler.on_send_join_request(origin, pdu) time_now = self._clock.time_msec() - defer.returnValue( - ( - 200, - { - "state": [p.get_pdu_json(time_now) for p in res_pdus["state"]], - "auth_chain": [ - p.get_pdu_json(time_now) for p in res_pdus["auth_chain"] - ], - }, - ) + return ( + 200, + { + "state": [p.get_pdu_json(time_now) for p in res_pdus["state"]], + "auth_chain": [ + p.get_pdu_json(time_now) for p in res_pdus["auth_chain"] + ], + }, ) @defer.inlineCallbacks @@ -428,9 +419,7 @@ class FederationServer(FederationBase): room_version = yield self.store.get_room_version(room_id) time_now = self._clock.time_msec() - defer.returnValue( - {"event": pdu.get_pdu_json(time_now), "room_version": room_version} - ) + return {"event": pdu.get_pdu_json(time_now), "room_version": room_version} @defer.inlineCallbacks def on_send_leave_request(self, origin, content, room_id): @@ -445,7 +434,7 @@ class FederationServer(FederationBase): logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures) yield self.handler.on_send_leave_request(origin, pdu) - defer.returnValue((200, {})) + return (200, {}) @defer.inlineCallbacks def on_event_auth(self, origin, room_id, event_id): @@ -456,7 +445,7 @@ class FederationServer(FederationBase): time_now = self._clock.time_msec() auth_pdus = yield self.handler.on_event_auth(event_id) res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]} - defer.returnValue((200, res)) + return (200, res) @defer.inlineCallbacks def on_query_auth_request(self, origin, content, room_id, event_id): @@ -509,7 +498,7 @@ class FederationServer(FederationBase): "missing": ret.get("missing", []), } - defer.returnValue((200, send_content)) + return (200, send_content) @log_function def on_query_client_keys(self, origin, content): @@ -548,7 +537,7 @@ class FederationServer(FederationBase): ), ) - defer.returnValue({"one_time_keys": json_result}) + return {"one_time_keys": json_result} @defer.inlineCallbacks @log_function @@ -580,9 +569,7 @@ class FederationServer(FederationBase): time_now = self._clock.time_msec() - defer.returnValue( - {"events": [ev.get_pdu_json(time_now) for ev in missing_events]} - ) + return {"events": [ev.get_pdu_json(time_now) for ev in missing_events]} @log_function def on_openid_userinfo(self, token): @@ -676,14 +663,14 @@ class FederationServer(FederationBase): ret = yield self.handler.exchange_third_party_invite( sender_user_id, target_user_id, room_id, signed ) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def on_exchange_third_party_invite_request(self, origin, room_id, event_dict): ret = yield self.handler.on_exchange_third_party_invite_request( origin, room_id, event_dict ) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def check_server_matches_acl(self, server_name, room_id): diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index 9aab12c0d3..fad980b893 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -374,7 +374,7 @@ class PerDestinationQueue(object): assert len(edus) <= limit, "get_devices_by_remote returned too many EDUs" - defer.returnValue((edus, now_stream_id)) + return (edus, now_stream_id) @defer.inlineCallbacks def _get_to_device_message_edus(self, limit): @@ -393,4 +393,4 @@ class PerDestinationQueue(object): for content in contents ] - defer.returnValue((edus, stream_id)) + return (edus, stream_id) diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py index 0460a8c4ac..52706302f2 100644 --- a/synapse/federation/sender/transaction_manager.py +++ b/synapse/federation/sender/transaction_manager.py @@ -133,4 +133,4 @@ class TransactionManager(object): ) success = False - defer.returnValue(success) + return success diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 1aae9ec9e7..2a6709ff48 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -183,7 +183,7 @@ class TransportLayerClient(object): try_trailing_slash_on_400=True, ) - defer.returnValue(response) + return response @defer.inlineCallbacks @log_function @@ -201,7 +201,7 @@ class TransportLayerClient(object): ignore_backoff=ignore_backoff, ) - defer.returnValue(content) + return content @defer.inlineCallbacks @log_function @@ -259,7 +259,7 @@ class TransportLayerClient(object): ignore_backoff=ignore_backoff, ) - defer.returnValue(content) + return content @defer.inlineCallbacks @log_function @@ -270,7 +270,7 @@ class TransportLayerClient(object): destination=destination, path=path, data=content ) - defer.returnValue(response) + return response @defer.inlineCallbacks @log_function @@ -288,7 +288,7 @@ class TransportLayerClient(object): ignore_backoff=True, ) - defer.returnValue(response) + return response @defer.inlineCallbacks @log_function @@ -299,7 +299,7 @@ class TransportLayerClient(object): destination=destination, path=path, data=content, ignore_backoff=True ) - defer.returnValue(response) + return response @defer.inlineCallbacks @log_function @@ -310,7 +310,7 @@ class TransportLayerClient(object): destination=destination, path=path, data=content, ignore_backoff=True ) - defer.returnValue(response) + return response @defer.inlineCallbacks @log_function @@ -339,7 +339,7 @@ class TransportLayerClient(object): destination=remote_server, path=path, args=args, ignore_backoff=True ) - defer.returnValue(response) + return response @defer.inlineCallbacks @log_function @@ -350,7 +350,7 @@ class TransportLayerClient(object): destination=destination, path=path, data=event_dict ) - defer.returnValue(response) + return response @defer.inlineCallbacks @log_function @@ -359,7 +359,7 @@ class TransportLayerClient(object): content = yield self.client.get_json(destination=destination, path=path) - defer.returnValue(content) + return content @defer.inlineCallbacks @log_function @@ -370,7 +370,7 @@ class TransportLayerClient(object): destination=destination, path=path, data=content ) - defer.returnValue(content) + return content @defer.inlineCallbacks @log_function @@ -402,7 +402,7 @@ class TransportLayerClient(object): content = yield self.client.post_json( destination=destination, path=path, data=query_content, timeout=timeout ) - defer.returnValue(content) + return content @defer.inlineCallbacks @log_function @@ -426,7 +426,7 @@ class TransportLayerClient(object): content = yield self.client.get_json( destination=destination, path=path, timeout=timeout ) - defer.returnValue(content) + return content @defer.inlineCallbacks @log_function @@ -460,7 +460,7 @@ class TransportLayerClient(object): content = yield self.client.post_json( destination=destination, path=path, data=query_content, timeout=timeout ) - defer.returnValue(content) + return content @defer.inlineCallbacks @log_function @@ -488,7 +488,7 @@ class TransportLayerClient(object): timeout=timeout, ) - defer.returnValue(content) + return content @log_function def get_group_profile(self, destination, group_id, requester_user_id): diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index f497711133..dfd7ae041b 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -157,7 +157,7 @@ class GroupAttestionRenewer(object): yield self.store.update_remote_attestion(group_id, user_id, attestation) - defer.returnValue({}) + return {} def _start_renew_attestations(self): return run_as_background_process("renew_attestations", self._renew_attestations) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 168c9e3f84..d50e691436 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -85,7 +85,7 @@ class GroupsServerHandler(object): if not is_admin: raise SynapseError(403, "User is not admin in group") - defer.returnValue(group) + return group @defer.inlineCallbacks def get_group_summary(self, group_id, requester_user_id): @@ -151,22 +151,20 @@ class GroupsServerHandler(object): group_id, requester_user_id ) - defer.returnValue( - { - "profile": profile, - "users_section": { - "users": users, - "roles": roles, - "total_user_count_estimate": 0, # TODO - }, - "rooms_section": { - "rooms": rooms, - "categories": categories, - "total_room_count_estimate": 0, # TODO - }, - "user": membership_info, - } - ) + return { + "profile": profile, + "users_section": { + "users": users, + "roles": roles, + "total_user_count_estimate": 0, # TODO + }, + "rooms_section": { + "rooms": rooms, + "categories": categories, + "total_room_count_estimate": 0, # TODO + }, + "user": membership_info, + } @defer.inlineCallbacks def update_group_summary_room( @@ -192,7 +190,7 @@ class GroupsServerHandler(object): is_public=is_public, ) - defer.returnValue({}) + return {} @defer.inlineCallbacks def delete_group_summary_room( @@ -208,7 +206,7 @@ class GroupsServerHandler(object): group_id=group_id, room_id=room_id, category_id=category_id ) - defer.returnValue({}) + return {} @defer.inlineCallbacks def set_group_join_policy(self, group_id, requester_user_id, content): @@ -228,7 +226,7 @@ class GroupsServerHandler(object): yield self.store.set_group_join_policy(group_id, join_policy=join_policy) - defer.returnValue({}) + return {} @defer.inlineCallbacks def get_group_categories(self, group_id, requester_user_id): @@ -237,7 +235,7 @@ class GroupsServerHandler(object): yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True) categories = yield self.store.get_group_categories(group_id=group_id) - defer.returnValue({"categories": categories}) + return {"categories": categories} @defer.inlineCallbacks def get_group_category(self, group_id, requester_user_id, category_id): @@ -249,7 +247,7 @@ class GroupsServerHandler(object): group_id=group_id, category_id=category_id ) - defer.returnValue(res) + return res @defer.inlineCallbacks def update_group_category(self, group_id, requester_user_id, category_id, content): @@ -269,7 +267,7 @@ class GroupsServerHandler(object): profile=profile, ) - defer.returnValue({}) + return {} @defer.inlineCallbacks def delete_group_category(self, group_id, requester_user_id, category_id): @@ -283,7 +281,7 @@ class GroupsServerHandler(object): group_id=group_id, category_id=category_id ) - defer.returnValue({}) + return {} @defer.inlineCallbacks def get_group_roles(self, group_id, requester_user_id): @@ -292,7 +290,7 @@ class GroupsServerHandler(object): yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True) roles = yield self.store.get_group_roles(group_id=group_id) - defer.returnValue({"roles": roles}) + return {"roles": roles} @defer.inlineCallbacks def get_group_role(self, group_id, requester_user_id, role_id): @@ -301,7 +299,7 @@ class GroupsServerHandler(object): yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True) res = yield self.store.get_group_role(group_id=group_id, role_id=role_id) - defer.returnValue(res) + return res @defer.inlineCallbacks def update_group_role(self, group_id, requester_user_id, role_id, content): @@ -319,7 +317,7 @@ class GroupsServerHandler(object): group_id=group_id, role_id=role_id, is_public=is_public, profile=profile ) - defer.returnValue({}) + return {} @defer.inlineCallbacks def delete_group_role(self, group_id, requester_user_id, role_id): @@ -331,7 +329,7 @@ class GroupsServerHandler(object): yield self.store.remove_group_role(group_id=group_id, role_id=role_id) - defer.returnValue({}) + return {} @defer.inlineCallbacks def update_group_summary_user( @@ -355,7 +353,7 @@ class GroupsServerHandler(object): is_public=is_public, ) - defer.returnValue({}) + return {} @defer.inlineCallbacks def delete_group_summary_user(self, group_id, requester_user_id, user_id, role_id): @@ -369,7 +367,7 @@ class GroupsServerHandler(object): group_id=group_id, user_id=user_id, role_id=role_id ) - defer.returnValue({}) + return {} @defer.inlineCallbacks def get_group_profile(self, group_id, requester_user_id): @@ -391,7 +389,7 @@ class GroupsServerHandler(object): group_description = {key: group[key] for key in cols} group_description["is_openly_joinable"] = group["join_policy"] == "open" - defer.returnValue(group_description) + return group_description else: raise SynapseError(404, "Unknown group") @@ -461,9 +459,7 @@ class GroupsServerHandler(object): # TODO: If admin add lists of users whose attestations have timed out - defer.returnValue( - {"chunk": chunk, "total_user_count_estimate": len(user_results)} - ) + return {"chunk": chunk, "total_user_count_estimate": len(user_results)} @defer.inlineCallbacks def get_invited_users_in_group(self, group_id, requester_user_id): @@ -494,9 +490,7 @@ class GroupsServerHandler(object): logger.warn("Error getting profile for %s: %s", user_id, e) user_profiles.append(user_profile) - defer.returnValue( - {"chunk": user_profiles, "total_user_count_estimate": len(invited_users)} - ) + return {"chunk": user_profiles, "total_user_count_estimate": len(invited_users)} @defer.inlineCallbacks def get_rooms_in_group(self, group_id, requester_user_id): @@ -533,9 +527,7 @@ class GroupsServerHandler(object): chunk.sort(key=lambda e: -e["num_joined_members"]) - defer.returnValue( - {"chunk": chunk, "total_room_count_estimate": len(room_results)} - ) + return {"chunk": chunk, "total_room_count_estimate": len(room_results)} @defer.inlineCallbacks def add_room_to_group(self, group_id, requester_user_id, room_id, content): @@ -551,7 +543,7 @@ class GroupsServerHandler(object): yield self.store.add_room_to_group(group_id, room_id, is_public=is_public) - defer.returnValue({}) + return {} @defer.inlineCallbacks def update_room_in_group( @@ -574,7 +566,7 @@ class GroupsServerHandler(object): else: raise SynapseError(400, "Uknown config option") - defer.returnValue({}) + return {} @defer.inlineCallbacks def remove_room_from_group(self, group_id, requester_user_id, room_id): @@ -586,7 +578,7 @@ class GroupsServerHandler(object): yield self.store.remove_room_from_group(group_id, room_id) - defer.returnValue({}) + return {} @defer.inlineCallbacks def invite_to_group(self, group_id, user_id, requester_user_id, content): @@ -644,9 +636,9 @@ class GroupsServerHandler(object): ) elif res["state"] == "invite": yield self.store.add_group_invite(group_id, user_id) - defer.returnValue({"state": "invite"}) + return {"state": "invite"} elif res["state"] == "reject": - defer.returnValue({"state": "reject"}) + return {"state": "reject"} else: raise SynapseError(502, "Unknown state returned by HS") @@ -679,7 +671,7 @@ class GroupsServerHandler(object): remote_attestation=remote_attestation, ) - defer.returnValue(local_attestation) + return local_attestation @defer.inlineCallbacks def accept_invite(self, group_id, requester_user_id, content): @@ -699,7 +691,7 @@ class GroupsServerHandler(object): local_attestation = yield self._add_user(group_id, requester_user_id, content) - defer.returnValue({"state": "join", "attestation": local_attestation}) + return {"state": "join", "attestation": local_attestation} @defer.inlineCallbacks def join_group(self, group_id, requester_user_id, content): @@ -716,7 +708,7 @@ class GroupsServerHandler(object): local_attestation = yield self._add_user(group_id, requester_user_id, content) - defer.returnValue({"state": "join", "attestation": local_attestation}) + return {"state": "join", "attestation": local_attestation} @defer.inlineCallbacks def knock(self, group_id, requester_user_id, content): @@ -769,7 +761,7 @@ class GroupsServerHandler(object): if not self.hs.is_mine_id(user_id): yield self.store.maybe_delete_remote_profile_cache(user_id) - defer.returnValue({}) + return {} @defer.inlineCallbacks def create_group(self, group_id, requester_user_id, content): @@ -845,7 +837,7 @@ class GroupsServerHandler(object): avatar_url=user_profile.get("avatar_url"), ) - defer.returnValue({"group_id": group_id}) + return {"group_id": group_id} @defer.inlineCallbacks def delete_group(self, group_id, requester_user_id): diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py index e62e6cab77..8acd9f9a83 100644 --- a/synapse/handlers/account_data.py +++ b/synapse/handlers/account_data.py @@ -51,8 +51,8 @@ class AccountDataEventSource(object): {"type": account_data_type, "content": content, "room_id": room_id} ) - defer.returnValue((results, current_stream_id)) + return (results, current_stream_id) @defer.inlineCallbacks def get_pagination_rows(self, user, config, key): - defer.returnValue(([], config.to_id)) + return ([], config.to_id) diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index 1f1708ba7d..930204e2d0 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -193,7 +193,7 @@ class AccountValidityHandler(object): if threepid["medium"] == "email": addresses.append(threepid["address"]) - defer.returnValue(addresses) + return addresses @defer.inlineCallbacks def _get_renewal_token(self, user_id): @@ -214,7 +214,7 @@ class AccountValidityHandler(object): try: renewal_token = stringutils.random_string(32) yield self.store.set_renewal_token_for_user(user_id, renewal_token) - defer.returnValue(renewal_token) + return renewal_token except StoreError: attempts += 1 raise StoreError(500, "Couldn't generate a unique string as refresh string.") @@ -254,4 +254,4 @@ class AccountValidityHandler(object): user_id=user_id, expiration_ts=expiration_ts, email_sent=email_sent ) - defer.returnValue(expiration_ts) + return expiration_ts diff --git a/synapse/handlers/acme.py b/synapse/handlers/acme.py index fbef2f3d38..46ac73106d 100644 --- a/synapse/handlers/acme.py +++ b/synapse/handlers/acme.py @@ -100,4 +100,4 @@ class AcmeHandler(object): logger.exception("Failed saving!") raise - defer.returnValue(True) + return True diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index e8a651e231..2f22f56ca4 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -49,7 +49,7 @@ class AdminHandler(BaseHandler): "devices": {"": {"sessions": [{"connections": connections}]}}, } - defer.returnValue(ret) + return ret @defer.inlineCallbacks def get_users(self): @@ -61,7 +61,7 @@ class AdminHandler(BaseHandler): """ ret = yield self.store.get_users() - defer.returnValue(ret) + return ret @defer.inlineCallbacks def get_users_paginate(self, order, start, limit): @@ -78,7 +78,7 @@ class AdminHandler(BaseHandler): """ ret = yield self.store.get_users_paginate(order, start, limit) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def search_users(self, term): @@ -92,7 +92,7 @@ class AdminHandler(BaseHandler): """ ret = yield self.store.search_users(term) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def export_user_data(self, user_id, writer): @@ -225,7 +225,7 @@ class AdminHandler(BaseHandler): state = yield self.store.get_state_for_event(event_id) writer.write_state(room_id, event_id, state) - defer.returnValue(writer.finished()) + return writer.finished() class ExfiltrationWriter(object): diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 8f089f0e33..d1a51df6f9 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -167,8 +167,8 @@ class ApplicationServicesHandler(object): for user_service in user_query_services: is_known_user = yield self.appservice_api.query_user(user_service, user_id) if is_known_user: - defer.returnValue(True) - defer.returnValue(False) + return True + return False @defer.inlineCallbacks def query_room_alias_exists(self, room_alias): @@ -192,7 +192,7 @@ class ApplicationServicesHandler(object): if is_known_alias: # the alias exists now so don't query more ASes. result = yield self.store.get_association_from_room_alias(room_alias) - defer.returnValue(result) + return result @defer.inlineCallbacks def query_3pe(self, kind, protocol, fields): @@ -215,7 +215,7 @@ class ApplicationServicesHandler(object): if success: ret.extend(result) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def get_3pe_protocols(self, only_protocol=None): @@ -254,7 +254,7 @@ class ApplicationServicesHandler(object): for p in protocols.keys(): protocols[p] = _merge_instances(protocols[p]) - defer.returnValue(protocols) + return protocols @defer.inlineCallbacks def _get_services_for_event(self, event): @@ -276,7 +276,7 @@ class ApplicationServicesHandler(object): if (yield s.is_interested(event, self.store)): interested_list.append(s) - defer.returnValue(interested_list) + return interested_list def _get_services_for_user(self, user_id): services = self.store.get_app_services() @@ -293,23 +293,23 @@ class ApplicationServicesHandler(object): if not self.is_mine_id(user_id): # we don't know if they are unknown or not since it isn't one of our # users. We can't poke ASes. - defer.returnValue(False) + return False return user_info = yield self.store.get_user_by_id(user_id) if user_info: - defer.returnValue(False) + return False return # user not found; could be the AS though, so check. services = self.store.get_app_services() service_list = [s for s in services if s.sender == user_id] - defer.returnValue(len(service_list) == 0) + return len(service_list) == 0 @defer.inlineCallbacks def _check_user_exists(self, user_id): unknown_user = yield self._is_unknown_user(user_id) if unknown_user: exists = yield self.query_user_exists(user_id) - defer.returnValue(exists) - defer.returnValue(True) + return exists + return True diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index d4d6574975..05be5b7c48 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -155,7 +155,7 @@ class AuthHandler(BaseHandler): if user_id != requester.user.to_string(): raise AuthError(403, "Invalid auth") - defer.returnValue(params) + return params @defer.inlineCallbacks def check_auth(self, flows, clientdict, clientip, password_servlet=False): @@ -280,7 +280,7 @@ class AuthHandler(BaseHandler): creds, list(clientdict), ) - defer.returnValue((creds, clientdict, session["id"])) + return (creds, clientdict, session["id"]) ret = self._auth_dict_for_flows(flows, session) ret["completed"] = list(creds) @@ -307,8 +307,8 @@ class AuthHandler(BaseHandler): if result: creds[stagetype] = result self._save_session(sess) - defer.returnValue(True) - defer.returnValue(False) + return True + return False def get_session_id(self, clientdict): """ @@ -379,7 +379,7 @@ class AuthHandler(BaseHandler): res = yield checker( authdict, clientip=clientip, password_servlet=password_servlet ) - defer.returnValue(res) + return res # build a v1-login-style dict out of the authdict and fall back to the # v1 code @@ -389,7 +389,7 @@ class AuthHandler(BaseHandler): raise SynapseError(400, "", Codes.MISSING_PARAM) (canonical_id, callback) = yield self.validate_login(user_id, authdict) - defer.returnValue(canonical_id) + return canonical_id @defer.inlineCallbacks def _check_recaptcha(self, authdict, clientip, **kwargs): @@ -433,7 +433,7 @@ class AuthHandler(BaseHandler): resp_body.get("hostname"), ) if resp_body["success"]: - defer.returnValue(True) + return True raise LoginError(401, "", errcode=Codes.UNAUTHORIZED) def _check_email_identity(self, authdict, **kwargs): @@ -502,7 +502,7 @@ class AuthHandler(BaseHandler): threepid["threepid_creds"] = authdict["threepid_creds"] - defer.returnValue(threepid) + return threepid def _get_params_recaptcha(self): return {"public_key": self.hs.config.recaptcha_public_key} @@ -606,7 +606,7 @@ class AuthHandler(BaseHandler): yield self.store.delete_access_token(access_token) raise StoreError(400, "Login raced against device deletion") - defer.returnValue(access_token) + return access_token @defer.inlineCallbacks def check_user_exists(self, user_id): @@ -629,8 +629,8 @@ class AuthHandler(BaseHandler): self.ratelimit_login_per_account(user_id) res = yield self._find_user_id_and_pwd_hash(user_id) if res is not None: - defer.returnValue(res[0]) - defer.returnValue(None) + return res[0] + return None @defer.inlineCallbacks def _find_user_id_and_pwd_hash(self, user_id): @@ -661,7 +661,7 @@ class AuthHandler(BaseHandler): user_id, user_infos.keys(), ) - defer.returnValue(result) + return result def get_supported_login_types(self): """Get a the login types supported for the /login API @@ -722,7 +722,7 @@ class AuthHandler(BaseHandler): known_login_type = True is_valid = yield provider.check_password(qualified_user_id, password) if is_valid: - defer.returnValue((qualified_user_id, None)) + return (qualified_user_id, None) if not hasattr(provider, "get_supported_login_types") or not hasattr( provider, "check_auth" @@ -756,7 +756,7 @@ class AuthHandler(BaseHandler): if result: if isinstance(result, str): result = (result, None) - defer.returnValue(result) + return result if login_type == LoginType.PASSWORD and self.hs.config.password_localdb_enabled: known_login_type = True @@ -766,7 +766,7 @@ class AuthHandler(BaseHandler): ) if canonical_user_id: - defer.returnValue((canonical_user_id, None)) + return (canonical_user_id, None) if not known_login_type: raise SynapseError(400, "Unknown login type %s" % login_type) @@ -814,9 +814,9 @@ class AuthHandler(BaseHandler): if isinstance(result, str): # If it's a str, set callback function to None result = (result, None) - defer.returnValue(result) + return result - defer.returnValue((None, None)) + return (None, None) @defer.inlineCallbacks def _check_local_password(self, user_id, password): @@ -838,7 +838,7 @@ class AuthHandler(BaseHandler): """ lookupres = yield self._find_user_id_and_pwd_hash(user_id) if not lookupres: - defer.returnValue(None) + return None (user_id, password_hash) = lookupres # If the password hash is None, the account has likely been deactivated @@ -850,8 +850,8 @@ class AuthHandler(BaseHandler): result = yield self.validate_hash(password, password_hash) if not result: logger.warn("Failed password login for user %s", user_id) - defer.returnValue(None) - defer.returnValue(user_id) + return None + return user_id @defer.inlineCallbacks def validate_short_term_login_token_and_get_user_id(self, login_token): @@ -865,7 +865,7 @@ class AuthHandler(BaseHandler): raise AuthError(403, "Invalid token", errcode=Codes.FORBIDDEN) self.ratelimit_login_per_account(user_id) yield self.auth.check_auth_blocking(user_id) - defer.returnValue(user_id) + return user_id @defer.inlineCallbacks def delete_access_token(self, access_token): @@ -976,7 +976,7 @@ class AuthHandler(BaseHandler): ) yield self.store.user_delete_threepid(user_id, medium, address) - defer.returnValue(result) + return result def _save_session(self, session): # TODO: Persistent storage diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index e8f9da6098..5f804d1f13 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -125,7 +125,7 @@ class DeactivateAccountHandler(BaseHandler): # Mark the user as deactivated. yield self.store.set_user_deactivated_status(user_id, True) - defer.returnValue(identity_server_supports_unbinding) + return identity_server_supports_unbinding def _start_user_parting(self): """ diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 99e8413092..d6ab337783 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -64,7 +64,7 @@ class DeviceWorkerHandler(BaseHandler): for device in devices: _update_device_from_client_ips(device, ips) - defer.returnValue(devices) + return devices @defer.inlineCallbacks def get_device(self, user_id, device_id): @@ -85,7 +85,7 @@ class DeviceWorkerHandler(BaseHandler): raise errors.NotFoundError ips = yield self.store.get_last_client_ip_by_device(user_id, device_id) _update_device_from_client_ips(device, ips) - defer.returnValue(device) + return device @measure_func("device.get_user_ids_changed") @defer.inlineCallbacks @@ -200,9 +200,7 @@ class DeviceWorkerHandler(BaseHandler): possibly_joined = [] possibly_left = [] - defer.returnValue( - {"changed": list(possibly_joined), "left": list(possibly_left)} - ) + return {"changed": list(possibly_joined), "left": list(possibly_left)} class DeviceHandler(DeviceWorkerHandler): @@ -250,7 +248,7 @@ class DeviceHandler(DeviceWorkerHandler): ) if new_device: yield self.notify_device_update(user_id, [device_id]) - defer.returnValue(device_id) + return device_id # if the device id is not specified, we'll autogen one, but loop a few # times in case of a clash. @@ -264,7 +262,7 @@ class DeviceHandler(DeviceWorkerHandler): ) if new_device: yield self.notify_device_update(user_id, [device_id]) - defer.returnValue(device_id) + return device_id attempts += 1 raise errors.StoreError(500, "Couldn't generate a device ID.") @@ -411,9 +409,7 @@ class DeviceHandler(DeviceWorkerHandler): @defer.inlineCallbacks def on_federation_query_user_devices(self, user_id): stream_id, devices = yield self.store.get_devices_with_keys_by_user(user_id) - defer.returnValue( - {"user_id": user_id, "stream_id": stream_id, "devices": devices} - ) + return {"user_id": user_id, "stream_id": stream_id, "devices": devices} @defer.inlineCallbacks def user_left_room(self, user, room_id): @@ -623,7 +619,7 @@ class DeviceListEduUpdater(object): for _, stream_id, prev_ids, _ in updates: if not prev_ids: # We always do a resync if there are no previous IDs - defer.returnValue(True) + return True for prev_id in prev_ids: if prev_id == extremity: @@ -633,8 +629,8 @@ class DeviceListEduUpdater(object): elif prev_id in stream_id_in_updates: continue else: - defer.returnValue(True) + return True stream_id_in_updates.add(stream_id) - defer.returnValue(False) + return False diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 42d5b3db30..0fd423197c 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -210,7 +210,7 @@ class DirectoryHandler(BaseHandler): except AuthError as e: logger.info("Failed to update alias events: %s", e) - defer.returnValue(room_id) + return room_id @defer.inlineCallbacks def delete_appservice_association(self, service, room_alias): @@ -229,7 +229,7 @@ class DirectoryHandler(BaseHandler): room_id = yield self.store.delete_room_alias(room_alias) - defer.returnValue(room_id) + return room_id @defer.inlineCallbacks def get_association(self, room_alias): @@ -277,7 +277,7 @@ class DirectoryHandler(BaseHandler): else: servers = list(servers) - defer.returnValue({"room_id": room_id, "servers": servers}) + return {"room_id": room_id, "servers": servers} return @defer.inlineCallbacks @@ -289,7 +289,7 @@ class DirectoryHandler(BaseHandler): result = yield self.get_association_from_room_alias(room_alias) if result is not None: - defer.returnValue({"room_id": result.room_id, "servers": result.servers}) + return {"room_id": result.room_id, "servers": result.servers} else: raise SynapseError( 404, @@ -342,7 +342,7 @@ class DirectoryHandler(BaseHandler): # Query AS to see if it exists as_handler = self.appservice_handler result = yield as_handler.query_room_alias_exists(room_alias) - defer.returnValue(result) + return result def can_modify_alias(self, alias, user_id=None): # Any application service "interested" in an alias they are regexing on @@ -369,10 +369,10 @@ class DirectoryHandler(BaseHandler): creator = yield self.store.get_room_alias_creator(alias.to_string()) if creator is not None and creator == user_id: - defer.returnValue(True) + return True is_admin = yield self.auth.is_server_admin(UserID.from_string(user_id)) - defer.returnValue(is_admin) + return is_admin @defer.inlineCallbacks def edit_published_room_list(self, requester, room_id, visibility): diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index fdfe8611b6..1300b540e3 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -144,7 +144,7 @@ class E2eKeysHandler(object): ) ) - defer.returnValue({"device_keys": results, "failures": failures}) + return {"device_keys": results, "failures": failures} @defer.inlineCallbacks def query_local_devices(self, query): @@ -189,7 +189,7 @@ class E2eKeysHandler(object): r["unsigned"]["device_display_name"] = display_name result_dict[user_id][device_id] = r - defer.returnValue(result_dict) + return result_dict @defer.inlineCallbacks def on_federation_query_client_keys(self, query_body): @@ -197,7 +197,7 @@ class E2eKeysHandler(object): """ device_keys_query = query_body.get("device_keys", {}) res = yield self.query_local_devices(device_keys_query) - defer.returnValue({"device_keys": res}) + return {"device_keys": res} @defer.inlineCallbacks def claim_one_time_keys(self, query, timeout): @@ -259,7 +259,7 @@ class E2eKeysHandler(object): ), ) - defer.returnValue({"one_time_keys": json_result, "failures": failures}) + return {"one_time_keys": json_result, "failures": failures} @defer.inlineCallbacks def upload_keys_for_user(self, user_id, device_id, keys): @@ -297,7 +297,7 @@ class E2eKeysHandler(object): result = yield self.store.count_e2e_one_time_keys(user_id, device_id) - defer.returnValue({"one_time_key_counts": result}) + return {"one_time_key_counts": result} @defer.inlineCallbacks def _upload_one_time_keys_for_user( diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index ebd807bca6..41b871fc59 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -84,7 +84,7 @@ class E2eRoomKeysHandler(object): user_id, version, room_id, session_id ) - defer.returnValue(results) + return results @defer.inlineCallbacks def delete_room_keys(self, user_id, version, room_id=None, session_id=None): @@ -262,7 +262,7 @@ class E2eRoomKeysHandler(object): new_version = yield self.store.create_e2e_room_keys_version( user_id, version_info ) - defer.returnValue(new_version) + return new_version @defer.inlineCallbacks def get_version_info(self, user_id, version=None): @@ -292,7 +292,7 @@ class E2eRoomKeysHandler(object): raise NotFoundError("Unknown backup version") else: raise - defer.returnValue(res) + return res @defer.inlineCallbacks def delete_version(self, user_id, version=None): @@ -350,4 +350,4 @@ class E2eRoomKeysHandler(object): user_id, version, version_info ) - defer.returnValue({}) + return {} diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 6a38328af3..2f1f10a9af 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -143,7 +143,7 @@ class EventStreamHandler(BaseHandler): "end": tokens[1].to_string(), } - defer.returnValue(chunk) + return chunk class EventHandler(BaseHandler): @@ -166,7 +166,7 @@ class EventHandler(BaseHandler): event = yield self.store.get_event(event_id, check_room_id=room_id) if not event: - defer.returnValue(None) + return None return users = yield self.store.get_users_in_room(event.room_id) @@ -179,4 +179,4 @@ class EventHandler(BaseHandler): if not filtered: raise AuthError(403, "You don't have permission to access that event.") - defer.returnValue(event) + return event diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 57be968c67..2aa208a2b8 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -210,7 +210,7 @@ class FederationHandler(BaseHandler): event_id, origin, ) - defer.returnValue(None) + return None state = None auth_chain = [] @@ -676,7 +676,7 @@ class FederationHandler(BaseHandler): events = [e for e in events if e.event_id not in seen_events] if not events: - defer.returnValue([]) + return [] event_map = {e.event_id: e for e in events} @@ -838,7 +838,7 @@ class FederationHandler(BaseHandler): # TODO: We can probably do something more clever here. yield self._handle_new_event(dest, event, backfilled=True) - defer.returnValue(events) + return events @defer.inlineCallbacks def maybe_backfill(self, room_id, current_depth): @@ -894,7 +894,7 @@ class FederationHandler(BaseHandler): ) if not filtered_extremities: - defer.returnValue(False) + return False # Check if we reached a point where we should start backfilling. sorted_extremeties_tuple = sorted(extremities.items(), key=lambda e: -int(e[1])) @@ -965,7 +965,7 @@ class FederationHandler(BaseHandler): # If this succeeded then we probably already have the # appropriate stuff. # TODO: We can probably do something more intelligent here. - defer.returnValue(True) + return True except SynapseError as e: logger.info("Failed to backfill from %s because %s", dom, e) continue @@ -985,11 +985,11 @@ class FederationHandler(BaseHandler): logger.exception("Failed to backfill from %s because %s", dom, e) continue - defer.returnValue(False) + return False success = yield try_backfill(likely_domains) if success: - defer.returnValue(True) + return True # Huh, well *those* domains didn't work out. Lets try some domains # from the time. @@ -1031,11 +1031,11 @@ class FederationHandler(BaseHandler): [dom for dom, _ in likely_domains if dom not in tried_domains] ) if success: - defer.returnValue(True) + return True tried_domains.update(dom for dom, _ in likely_domains) - defer.returnValue(False) + return False def _sanity_check_event(self, ev): """ @@ -1082,7 +1082,7 @@ class FederationHandler(BaseHandler): pdu=event, ) - defer.returnValue(pdu) + return pdu @defer.inlineCallbacks def on_event_auth(self, event_id): @@ -1090,7 +1090,7 @@ class FederationHandler(BaseHandler): auth = yield self.store.get_auth_chain( [auth_id for auth_id in event.auth_event_ids()], include_given=True ) - defer.returnValue([e for e in auth]) + return [e for e in auth] @log_function @defer.inlineCallbacks @@ -1177,7 +1177,7 @@ class FederationHandler(BaseHandler): run_in_background(self._handle_queued_pdus, room_queue) - defer.returnValue(True) + return True @defer.inlineCallbacks def _handle_queued_pdus(self, room_queue): @@ -1247,7 +1247,7 @@ class FederationHandler(BaseHandler): room_version, event, context, do_sig_check=False ) - defer.returnValue(event) + return event @defer.inlineCallbacks @log_function @@ -1308,7 +1308,7 @@ class FederationHandler(BaseHandler): state = yield self.store.get_events(list(prev_state_ids.values())) - defer.returnValue({"state": list(state.values()), "auth_chain": auth_chain}) + return {"state": list(state.values()), "auth_chain": auth_chain} @defer.inlineCallbacks def on_invite_request(self, origin, pdu): @@ -1364,7 +1364,7 @@ class FederationHandler(BaseHandler): context = yield self.state_handler.compute_event_context(event) yield self.persist_events_and_notify([(event, context)]) - defer.returnValue(event) + return event @defer.inlineCallbacks def do_remotely_reject_invite(self, target_hosts, room_id, user_id): @@ -1389,7 +1389,7 @@ class FederationHandler(BaseHandler): context = yield self.state_handler.compute_event_context(event) yield self.persist_events_and_notify([(event, context)]) - defer.returnValue(event) + return event @defer.inlineCallbacks def _make_and_verify_event( @@ -1407,7 +1407,7 @@ class FederationHandler(BaseHandler): assert event.user_id == user_id assert event.state_key == user_id assert event.room_id == room_id - defer.returnValue((origin, event, format_ver)) + return (origin, event, format_ver) @defer.inlineCallbacks @log_function @@ -1451,7 +1451,7 @@ class FederationHandler(BaseHandler): logger.warn("Failed to create new leave %r because %s", event, e) raise e - defer.returnValue(event) + return event @defer.inlineCallbacks @log_function @@ -1484,7 +1484,7 @@ class FederationHandler(BaseHandler): event.signatures, ) - defer.returnValue(None) + return None @defer.inlineCallbacks def get_state_for_pdu(self, room_id, event_id): @@ -1512,9 +1512,9 @@ class FederationHandler(BaseHandler): del results[(event.type, event.state_key)] res = list(results.values()) - defer.returnValue(res) + return res else: - defer.returnValue([]) + return [] @defer.inlineCallbacks def get_state_ids_for_pdu(self, room_id, event_id): @@ -1539,9 +1539,9 @@ class FederationHandler(BaseHandler): else: results.pop((event.type, event.state_key), None) - defer.returnValue(list(results.values())) + return list(results.values()) else: - defer.returnValue([]) + return [] @defer.inlineCallbacks @log_function @@ -1554,7 +1554,7 @@ class FederationHandler(BaseHandler): events = yield filter_events_for_server(self.store, origin, events) - defer.returnValue(events) + return events @defer.inlineCallbacks @log_function @@ -1584,9 +1584,9 @@ class FederationHandler(BaseHandler): events = yield filter_events_for_server(self.store, origin, [event]) event = events[0] - defer.returnValue(event) + return event else: - defer.returnValue(None) + return None def get_min_depth_for_context(self, context): return self.store.get_min_depth(context) @@ -1618,7 +1618,7 @@ class FederationHandler(BaseHandler): self.store.remove_push_actions_from_staging, event.event_id ) - defer.returnValue(context) + return context @defer.inlineCallbacks def _handle_new_events(self, origin, event_infos, backfilled=False): @@ -1641,7 +1641,7 @@ class FederationHandler(BaseHandler): auth_events=ev_info.get("auth_events"), backfilled=backfilled, ) - defer.returnValue(res) + return res contexts = yield make_deferred_yieldable( defer.gatherResults( @@ -1800,7 +1800,7 @@ class FederationHandler(BaseHandler): if event.type == EventTypes.GuestAccess and not context.rejected: yield self.maybe_kick_guest_users(event) - defer.returnValue(context) + return context @defer.inlineCallbacks def _check_for_soft_fail(self, event, state, backfilled): @@ -1919,7 +1919,7 @@ class FederationHandler(BaseHandler): logger.debug("on_query_auth returning: %s", ret) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def on_get_missing_events( @@ -1942,7 +1942,7 @@ class FederationHandler(BaseHandler): self.store, origin, missing_events ) - defer.returnValue(missing_events) + return missing_events @defer.inlineCallbacks @log_function @@ -2418,16 +2418,14 @@ class FederationHandler(BaseHandler): logger.debug("construct_auth_difference returning") - defer.returnValue( - { - "auth_chain": local_auth, - "rejects": { - e.event_id: {"reason": reason_map[e.event_id], "proof": None} - for e in base_remote_rejected - }, - "missing": [e.event_id for e in missing_locals], - } - ) + return { + "auth_chain": local_auth, + "rejects": { + e.event_id: {"reason": reason_map[e.event_id], "proof": None} + for e in base_remote_rejected + }, + "missing": [e.event_id for e in missing_locals], + } @defer.inlineCallbacks @log_function @@ -2575,7 +2573,7 @@ class FederationHandler(BaseHandler): builder=builder ) EventValidator().validate_new(event) - defer.returnValue((event, context)) + return (event, context) @defer.inlineCallbacks def _check_signature(self, event, context): diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 7da63bb643..7b67c8ae0f 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -162,7 +162,7 @@ class GroupsLocalHandler(object): res.setdefault("user", {})["is_publicised"] = is_publicised - defer.returnValue(res) + return res @defer.inlineCallbacks def create_group(self, group_id, user_id, content): @@ -207,7 +207,7 @@ class GroupsLocalHandler(object): ) self.notifier.on_new_event("groups_key", token, users=[user_id]) - defer.returnValue(res) + return res @defer.inlineCallbacks def get_users_in_group(self, group_id, requester_user_id): @@ -217,7 +217,7 @@ class GroupsLocalHandler(object): res = yield self.groups_server_handler.get_users_in_group( group_id, requester_user_id ) - defer.returnValue(res) + return res group_server_name = get_domain_from_id(group_id) @@ -244,7 +244,7 @@ class GroupsLocalHandler(object): res["chunk"] = valid_entries - defer.returnValue(res) + return res @defer.inlineCallbacks def join_group(self, group_id, user_id, content): @@ -285,7 +285,7 @@ class GroupsLocalHandler(object): ) self.notifier.on_new_event("groups_key", token, users=[user_id]) - defer.returnValue({}) + return {} @defer.inlineCallbacks def accept_invite(self, group_id, user_id, content): @@ -326,7 +326,7 @@ class GroupsLocalHandler(object): ) self.notifier.on_new_event("groups_key", token, users=[user_id]) - defer.returnValue({}) + return {} @defer.inlineCallbacks def invite(self, group_id, user_id, requester_user_id, config): @@ -346,7 +346,7 @@ class GroupsLocalHandler(object): content, ) - defer.returnValue(res) + return res @defer.inlineCallbacks def on_invite(self, group_id, user_id, content): @@ -377,7 +377,7 @@ class GroupsLocalHandler(object): logger.warn("No profile for user %s: %s", user_id, e) user_profile = {} - defer.returnValue({"state": "invite", "user_profile": user_profile}) + return {"state": "invite", "user_profile": user_profile} @defer.inlineCallbacks def remove_user_from_group(self, group_id, user_id, requester_user_id, content): @@ -406,7 +406,7 @@ class GroupsLocalHandler(object): content, ) - defer.returnValue(res) + return res @defer.inlineCallbacks def user_removed_from_group(self, group_id, user_id, content): @@ -421,7 +421,7 @@ class GroupsLocalHandler(object): @defer.inlineCallbacks def get_joined_groups(self, user_id): group_ids = yield self.store.get_joined_groups(user_id) - defer.returnValue({"groups": group_ids}) + return {"groups": group_ids} @defer.inlineCallbacks def get_publicised_groups_for_user(self, user_id): @@ -433,14 +433,14 @@ class GroupsLocalHandler(object): for app_service in self.store.get_app_services(): result.extend(app_service.get_groups_for_user(user_id)) - defer.returnValue({"groups": result}) + return {"groups": result} else: bulk_result = yield self.transport_client.bulk_get_publicised_groups( get_domain_from_id(user_id), [user_id] ) result = bulk_result.get("users", {}).get(user_id) # TODO: Verify attestations - defer.returnValue({"groups": result}) + return {"groups": result} @defer.inlineCallbacks def bulk_get_publicised_groups(self, user_ids, proxy=True): @@ -475,4 +475,4 @@ class GroupsLocalHandler(object): for app_service in self.store.get_app_services(): results[uid].extend(app_service.get_groups_for_user(uid)) - defer.returnValue({"users": results}) + return {"users": results} diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 546d6169e9..d199521b58 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -82,7 +82,7 @@ class IdentityHandler(BaseHandler): "%s is not a trusted ID server: rejecting 3pid " + "credentials", id_server, ) - defer.returnValue(None) + return None try: data = yield self.http_client.get_json( @@ -95,8 +95,8 @@ class IdentityHandler(BaseHandler): raise e.to_synapse_error() if "medium" in data: - defer.returnValue(data) - defer.returnValue(None) + return data + return None @defer.inlineCallbacks def bind_threepid(self, creds, mxid): @@ -133,7 +133,7 @@ class IdentityHandler(BaseHandler): ) except CodeMessageException as e: data = json.loads(e.msg) # XXX WAT? - defer.returnValue(data) + return data @defer.inlineCallbacks def try_unbind_threepid(self, mxid, threepid): @@ -161,7 +161,7 @@ class IdentityHandler(BaseHandler): # We don't know where to unbind, so we don't have a choice but to return if not id_servers: - defer.returnValue(False) + return False changed = True for id_server in id_servers: @@ -169,7 +169,7 @@ class IdentityHandler(BaseHandler): mxid, threepid, id_server ) - defer.returnValue(changed) + return changed @defer.inlineCallbacks def try_unbind_threepid_with_id_server(self, mxid, threepid, id_server): @@ -224,7 +224,7 @@ class IdentityHandler(BaseHandler): id_server=id_server, ) - defer.returnValue(changed) + return changed @defer.inlineCallbacks def requestEmailToken( @@ -250,7 +250,7 @@ class IdentityHandler(BaseHandler): % (id_server, "/_matrix/identity/api/v1/validate/email/requestToken"), params, ) - defer.returnValue(data) + return data except HttpResponseException as e: logger.info("Proxied requestToken failed: %r", e) raise e.to_synapse_error() @@ -278,7 +278,7 @@ class IdentityHandler(BaseHandler): % (id_server, "/_matrix/identity/api/v1/validate/msisdn/requestToken"), params, ) - defer.returnValue(data) + return data except HttpResponseException as e: logger.info("Proxied requestToken failed: %r", e) raise e.to_synapse_error() diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 54c966c8a6..42d6650ed9 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -250,7 +250,7 @@ class InitialSyncHandler(BaseHandler): "end": now_token.to_string(), } - defer.returnValue(ret) + return ret @defer.inlineCallbacks def room_initial_sync(self, requester, room_id, pagin_config=None): @@ -301,7 +301,7 @@ class InitialSyncHandler(BaseHandler): result["account_data"] = account_data_events - defer.returnValue(result) + return result @defer.inlineCallbacks def _room_initial_sync_parted( @@ -330,28 +330,24 @@ class InitialSyncHandler(BaseHandler): time_now = self.clock.time_msec() - defer.returnValue( - { - "membership": membership, - "room_id": room_id, - "messages": { - "chunk": ( - yield self._event_serializer.serialize_events( - messages, time_now - ) - ), - "start": start_token.to_string(), - "end": end_token.to_string(), - }, - "state": ( - yield self._event_serializer.serialize_events( - room_state.values(), time_now - ) + return { + "membership": membership, + "room_id": room_id, + "messages": { + "chunk": ( + yield self._event_serializer.serialize_events(messages, time_now) ), - "presence": [], - "receipts": [], - } - ) + "start": start_token.to_string(), + "end": end_token.to_string(), + }, + "state": ( + yield self._event_serializer.serialize_events( + room_state.values(), time_now + ) + ), + "presence": [], + "receipts": [], + } @defer.inlineCallbacks def _room_initial_sync_joined( @@ -384,13 +380,13 @@ class InitialSyncHandler(BaseHandler): def get_presence(): # If presence is disabled, return an empty list if not self.hs.config.use_presence: - defer.returnValue([]) + return [] states = yield presence_handler.get_states( [m.user_id for m in room_members], as_event=True ) - defer.returnValue(states) + return states @defer.inlineCallbacks def get_receipts(): @@ -399,7 +395,7 @@ class InitialSyncHandler(BaseHandler): ) if not receipts: receipts = [] - defer.returnValue(receipts) + return receipts presence, receipts, (messages, token) = yield make_deferred_yieldable( defer.gatherResults( @@ -442,7 +438,7 @@ class InitialSyncHandler(BaseHandler): if not is_peeking: ret["membership"] = membership - defer.returnValue(ret) + return ret @defer.inlineCallbacks def _check_in_room_or_world_readable(self, room_id, user_id): @@ -453,7 +449,7 @@ class InitialSyncHandler(BaseHandler): # * The user is a guest user, and has joined the room # else it will throw. member_event = yield self.auth.check_user_was_in_room(room_id, user_id) - defer.returnValue((member_event.membership, member_event.event_id)) + return (member_event.membership, member_event.event_id) return except AuthError: visibility = yield self.state_handler.get_current_state( @@ -463,7 +459,7 @@ class InitialSyncHandler(BaseHandler): visibility and visibility.content["history_visibility"] == "world_readable" ): - defer.returnValue((Membership.JOIN, None)) + return (Membership.JOIN, None) return raise AuthError( 403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 6d7a987f13..8b27e23378 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -87,7 +87,7 @@ class MessageHandler(object): ) data = room_state[membership_event_id].get(key) - defer.returnValue(data) + return data @defer.inlineCallbacks def get_state_events( @@ -174,7 +174,7 @@ class MessageHandler(object): # events, as clients won't use them. bundle_aggregations=False, ) - defer.returnValue(events) + return events @defer.inlineCallbacks def get_joined_members(self, requester, room_id): @@ -213,15 +213,13 @@ class MessageHandler(object): # Loop fell through, AS has no interested users in room raise AuthError(403, "Appservice not in room") - defer.returnValue( - { - user_id: { - "avatar_url": profile.avatar_url, - "display_name": profile.display_name, - } - for user_id, profile in iteritems(users_with_profile) + return { + user_id: { + "avatar_url": profile.avatar_url, + "display_name": profile.display_name, } - ) + for user_id, profile in iteritems(users_with_profile) + } class EventCreationHandler(object): @@ -398,7 +396,7 @@ class EventCreationHandler(object): self.validator.validate_new(event) - defer.returnValue((event, context)) + return (event, context) def _is_exempt_from_privacy_policy(self, builder, requester): """"Determine if an event to be sent is exempt from having to consent @@ -425,9 +423,9 @@ class EventCreationHandler(object): @defer.inlineCallbacks def _is_server_notices_room(self, room_id): if self.config.server_notices_mxid is None: - defer.returnValue(False) + return False user_ids = yield self.store.get_users_in_room(room_id) - defer.returnValue(self.config.server_notices_mxid in user_ids) + return self.config.server_notices_mxid in user_ids @defer.inlineCallbacks def assert_accepted_privacy_policy(self, requester): @@ -507,7 +505,7 @@ class EventCreationHandler(object): event.event_id, prev_state.event_id, ) - defer.returnValue(prev_state) + return prev_state yield self.handle_new_client_event( requester=requester, event=event, context=context, ratelimit=ratelimit @@ -531,7 +529,7 @@ class EventCreationHandler(object): prev_content = encode_canonical_json(prev_event.content) next_content = encode_canonical_json(event.content) if prev_content == next_content: - defer.returnValue(prev_event) + return prev_event return @defer.inlineCallbacks @@ -563,7 +561,7 @@ class EventCreationHandler(object): yield self.send_nonmember_event( requester, event, context, ratelimit=ratelimit ) - defer.returnValue(event) + return event @measure_func("create_new_client_event") @defer.inlineCallbacks @@ -626,7 +624,7 @@ class EventCreationHandler(object): logger.debug("Created event %s", event.event_id) - defer.returnValue((event, context)) + return (event, context) @measure_func("handle_new_client_event") @defer.inlineCallbacks diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 20bcfed334..d83aab3f74 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -242,13 +242,11 @@ class PaginationHandler(object): ) if not events: - defer.returnValue( - { - "chunk": [], - "start": pagin_config.from_token.to_string(), - "end": next_token.to_string(), - } - ) + return { + "chunk": [], + "start": pagin_config.from_token.to_string(), + "end": next_token.to_string(), + } state = None if event_filter and event_filter.lazy_load_members() and len(events) > 0: @@ -286,4 +284,4 @@ class PaginationHandler(object): ) ) - defer.returnValue(chunk) + return chunk diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 6f3537e435..ea54d0b991 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -461,7 +461,7 @@ class PresenceHandler(object): if affect_presence: run_in_background(_end) - defer.returnValue(_user_syncing()) + return _user_syncing() def get_currently_syncing_users(self): """Get the set of user ids that are currently syncing on this HS. @@ -556,7 +556,7 @@ class PresenceHandler(object): """Get the current presence state for a user. """ res = yield self.current_state_for_users([user_id]) - defer.returnValue(res[user_id]) + return res[user_id] @defer.inlineCallbacks def current_state_for_users(self, user_ids): @@ -585,7 +585,7 @@ class PresenceHandler(object): states.update(new) self.user_to_current_state.update(new) - defer.returnValue(states) + return states @defer.inlineCallbacks def _persist_and_notify(self, states): @@ -681,7 +681,7 @@ class PresenceHandler(object): def get_state(self, target_user, as_event=False): results = yield self.get_states([target_user.to_string()], as_event=as_event) - defer.returnValue(results[0]) + return results[0] @defer.inlineCallbacks def get_states(self, target_user_ids, as_event=False): @@ -703,17 +703,15 @@ class PresenceHandler(object): now = self.clock.time_msec() if as_event: - defer.returnValue( - [ - { - "type": "m.presence", - "content": format_user_presence_state(state, now), - } - for state in updates - ] - ) + return [ + { + "type": "m.presence", + "content": format_user_presence_state(state, now), + } + for state in updates + ] else: - defer.returnValue(updates) + return updates @defer.inlineCallbacks def set_state(self, target_user, state, ignore_status_msg=False): @@ -757,9 +755,9 @@ class PresenceHandler(object): ) if observer_room_ids & observed_room_ids: - defer.returnValue(True) + return True - defer.returnValue(False) + return False @defer.inlineCallbacks def get_all_presence_updates(self, last_id, current_id): @@ -778,7 +776,7 @@ class PresenceHandler(object): # TODO(markjh): replicate the unpersisted changes. # This could use the in-memory stores for recent changes. rows = yield self.store.get_all_presence_updates(last_id, current_id) - defer.returnValue(rows) + return rows def notify_new_event(self): """Called when new events have happened. Handles users and servers @@ -1034,7 +1032,7 @@ class PresenceEventSource(object): # # Hence this guard where we just return nothing so that the sync # doesn't return. C.f. #5503. - defer.returnValue(([], max_token)) + return ([], max_token) presence = self.get_presence_handler() stream_change_cache = self.store.presence_stream_cache @@ -1068,17 +1066,11 @@ class PresenceEventSource(object): updates = yield presence.current_state_for_users(user_ids_changed) if include_offline: - defer.returnValue((list(updates.values()), max_token)) + return (list(updates.values()), max_token) else: - defer.returnValue( - ( - [ - s - for s in itervalues(updates) - if s.state != PresenceState.OFFLINE - ], - max_token, - ) + return ( + [s for s in itervalues(updates) if s.state != PresenceState.OFFLINE], + max_token, ) def get_current_key(self): @@ -1107,7 +1099,7 @@ class PresenceEventSource(object): ) users_interested_in.update(user_ids) - defer.returnValue(users_interested_in) + return users_interested_in def handle_timeouts(user_states, is_mine_fn, syncing_user_ids, now): @@ -1287,7 +1279,7 @@ def get_interested_parties(store, states): # Always notify self users_to_states.setdefault(state.user_id, []).append(state) - defer.returnValue((room_ids_to_states, users_to_states)) + return (room_ids_to_states, users_to_states) @defer.inlineCallbacks @@ -1321,4 +1313,4 @@ def get_interested_remotes(store, states, state_handler): host = get_domain_from_id(user_id) hosts_and_states.append(([host], states)) - defer.returnValue(hosts_and_states) + return hosts_and_states diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index a2388a7091..2cc237e6a5 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -73,7 +73,7 @@ class BaseProfileHandler(BaseHandler): raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND) raise - defer.returnValue({"displayname": displayname, "avatar_url": avatar_url}) + return {"displayname": displayname, "avatar_url": avatar_url} else: try: result = yield self.federation.make_query( @@ -82,7 +82,7 @@ class BaseProfileHandler(BaseHandler): args={"user_id": user_id}, ignore_backoff=True, ) - defer.returnValue(result) + return result except RequestSendFailed as e: raise_from(SynapseError(502, "Failed to fetch profile"), e) except HttpResponseException as e: @@ -108,10 +108,10 @@ class BaseProfileHandler(BaseHandler): raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND) raise - defer.returnValue({"displayname": displayname, "avatar_url": avatar_url}) + return {"displayname": displayname, "avatar_url": avatar_url} else: profile = yield self.store.get_from_remote_profile_cache(user_id) - defer.returnValue(profile or {}) + return profile or {} @defer.inlineCallbacks def get_displayname(self, target_user): @@ -125,7 +125,7 @@ class BaseProfileHandler(BaseHandler): raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND) raise - defer.returnValue(displayname) + return displayname else: try: result = yield self.federation.make_query( @@ -139,7 +139,7 @@ class BaseProfileHandler(BaseHandler): except HttpResponseException as e: raise e.to_synapse_error() - defer.returnValue(result["displayname"]) + return result["displayname"] @defer.inlineCallbacks def set_displayname(self, target_user, requester, new_displayname, by_admin=False): @@ -186,7 +186,7 @@ class BaseProfileHandler(BaseHandler): if e.code == 404: raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND) raise - defer.returnValue(avatar_url) + return avatar_url else: try: result = yield self.federation.make_query( @@ -200,7 +200,7 @@ class BaseProfileHandler(BaseHandler): except HttpResponseException as e: raise e.to_synapse_error() - defer.returnValue(result["avatar_url"]) + return result["avatar_url"] @defer.inlineCallbacks def set_avatar_url(self, target_user, requester, new_avatar_url, by_admin=False): @@ -251,7 +251,7 @@ class BaseProfileHandler(BaseHandler): raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND) raise - defer.returnValue(response) + return response @defer.inlineCallbacks def _update_join_states(self, requester, target_user): diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index a85dd8cdee..218d60f0c3 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -84,7 +84,7 @@ class ReceiptsHandler(BaseHandler): if min_batch_id is None: # no new receipts - defer.returnValue(False) + return False affected_room_ids = list(set([r.room_id for r in receipts])) @@ -94,7 +94,7 @@ class ReceiptsHandler(BaseHandler): min_batch_id, max_batch_id, affected_room_ids ) - defer.returnValue(True) + return True @defer.inlineCallbacks def received_client_receipt(self, room_id, receipt_type, user_id, event_id): @@ -124,9 +124,9 @@ class ReceiptsHandler(BaseHandler): ) if not result: - defer.returnValue([]) + return [] - defer.returnValue(result) + return result class ReceiptEventSource(object): @@ -139,13 +139,13 @@ class ReceiptEventSource(object): to_key = yield self.get_current_key() if from_key == to_key: - defer.returnValue(([], to_key)) + return ([], to_key) events = yield self.store.get_linearized_receipts_for_rooms( room_ids, from_key=from_key, to_key=to_key ) - defer.returnValue((events, to_key)) + return (events, to_key) def get_current_key(self, direction="f"): return self.store.get_max_receipt_stream_id() @@ -164,4 +164,4 @@ class ReceiptEventSource(object): room_ids, from_key=from_key, to_key=to_key ) - defer.returnValue((events, to_key)) + return (events, to_key) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index bb7cfd71b9..4631fab94e 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -265,7 +265,7 @@ class RegistrationHandler(BaseHandler): # Bind email to new account yield self._register_email_threepid(user_id, threepid_dict, None, False) - defer.returnValue(user_id) + return user_id @defer.inlineCallbacks def _auto_join_rooms(self, user_id): @@ -360,7 +360,7 @@ class RegistrationHandler(BaseHandler): appservice_id=service_id, create_profile_with_displayname=user.localpart, ) - defer.returnValue(user_id) + return user_id @defer.inlineCallbacks def check_recaptcha(self, ip, private_key, challenge, response): @@ -461,7 +461,7 @@ class RegistrationHandler(BaseHandler): id = self._next_generated_user_id self._next_generated_user_id += 1 - defer.returnValue(str(id)) + return str(id) @defer.inlineCallbacks def _validate_captcha(self, ip_addr, private_key, challenge, response): @@ -481,7 +481,7 @@ class RegistrationHandler(BaseHandler): "error_url": "http://www.recaptcha.net/recaptcha/api/challenge?" + "error=%s" % lines[1], } - defer.returnValue(json) + return json @defer.inlineCallbacks def _submit_captcha(self, ip_addr, private_key, challenge, response): @@ -497,7 +497,7 @@ class RegistrationHandler(BaseHandler): "response": response, }, ) - defer.returnValue(data) + return data @defer.inlineCallbacks def _join_user_to_room(self, requester, room_identifier): @@ -622,7 +622,7 @@ class RegistrationHandler(BaseHandler): initial_display_name=initial_display_name, is_guest=is_guest, ) - defer.returnValue((r["device_id"], r["access_token"])) + return (r["device_id"], r["access_token"]) valid_until_ms = None if self.session_lifetime is not None: @@ -645,7 +645,7 @@ class RegistrationHandler(BaseHandler): user_id, device_id=device_id, valid_until_ms=valid_until_ms ) - defer.returnValue((device_id, access_token)) + return (device_id, access_token) @defer.inlineCallbacks def post_registration_actions( @@ -798,7 +798,7 @@ class RegistrationHandler(BaseHandler): if ex.errcode == Codes.MISSING_PARAM: # This will only happen if the ID server returns a malformed response logger.info("Can't add incomplete 3pid") - defer.returnValue(None) + return None raise yield self._auth_handler.add_threepid( diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index db3f8cb76b..5caa90c3b7 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -128,7 +128,7 @@ class RoomCreationHandler(BaseHandler): old_room_id, new_version, # args for _upgrade_room ) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def _upgrade_room(self, requester, old_room_id, new_version): @@ -193,7 +193,7 @@ class RoomCreationHandler(BaseHandler): requester, old_room_id, new_room_id, old_room_state ) - defer.returnValue(new_room_id) + return new_room_id @defer.inlineCallbacks def _update_upgraded_room_pls( @@ -671,7 +671,7 @@ class RoomCreationHandler(BaseHandler): result["room_alias"] = room_alias.to_string() yield directory_handler.send_room_alias_update_event(requester, room_id) - defer.returnValue(result) + return result @defer.inlineCallbacks def _send_events_for_new_room( @@ -796,7 +796,7 @@ class RoomCreationHandler(BaseHandler): room_creator_user_id=creator_id, is_public=is_public, ) - defer.returnValue(gen_room_id) + return gen_room_id except StoreError: attempts += 1 raise StoreError(500, "Couldn't generate a room ID.") @@ -839,7 +839,7 @@ class RoomContextHandler(object): event_id, get_prev_content=True, allow_none=True ) if not event: - defer.returnValue(None) + return None return filtered = yield (filter_evts([event])) @@ -890,7 +890,7 @@ class RoomContextHandler(object): results["end"] = token.copy_and_replace("room_key", results["end"]).to_string() - defer.returnValue(results) + return results class RoomEventSource(object): @@ -941,7 +941,7 @@ class RoomEventSource(object): else: end_key = to_key - defer.returnValue((events, end_key)) + return (events, end_key) def get_current_key(self): return self.store.get_room_events_max_id() @@ -959,4 +959,4 @@ class RoomEventSource(object): limit=config.limit, ) - defer.returnValue((events, next_key)) + return (events, next_key) diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index aae696a7e8..e9094ad02b 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -325,7 +325,7 @@ class RoomListHandler(BaseHandler): current_limit=since_token.current_limit - 1, ).to_token() - defer.returnValue(results) + return results @defer.inlineCallbacks def _append_room_entry_to_chunk( @@ -420,7 +420,7 @@ class RoomListHandler(BaseHandler): if join_rules_event: join_rule = join_rules_event.content.get("join_rule", None) if not allow_private and join_rule and join_rule != JoinRules.PUBLIC: - defer.returnValue(None) + return None # Return whether this room is open to federation users or not create_event = current_state.get((EventTypes.Create, "")) @@ -469,7 +469,7 @@ class RoomListHandler(BaseHandler): if avatar_url: result["avatar_url"] = avatar_url - defer.returnValue(result) + return result @defer.inlineCallbacks def get_remote_public_room_list( @@ -482,7 +482,7 @@ class RoomListHandler(BaseHandler): third_party_instance_id=None, ): if not self.enable_room_list_search: - defer.returnValue({"chunk": [], "total_room_count_estimate": 0}) + return {"chunk": [], "total_room_count_estimate": 0} if search_filter: # We currently don't support searching across federation, so we have @@ -507,7 +507,7 @@ class RoomListHandler(BaseHandler): ] } - defer.returnValue(res) + return res def _get_remote_list_cached( self, diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index e0196ef83e..baea08ddd0 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -191,7 +191,7 @@ class RoomMemberHandler(object): ) if duplicate is not None: # Discard the new event since this membership change is a no-op. - defer.returnValue(duplicate) + return duplicate yield self.event_creation_handler.handle_new_client_event( requester, event, context, extra_users=[target], ratelimit=ratelimit @@ -233,7 +233,7 @@ class RoomMemberHandler(object): if prev_member_event.membership == Membership.JOIN: yield self._user_left_room(target, room_id) - defer.returnValue(event) + return event @defer.inlineCallbacks def copy_room_tags_and_direct_to_room(self, old_room_id, new_room_id, user_id): @@ -303,7 +303,7 @@ class RoomMemberHandler(object): require_consent=require_consent, ) - defer.returnValue(result) + return result @defer.inlineCallbacks def _update_membership( @@ -423,7 +423,7 @@ class RoomMemberHandler(object): same_membership = old_membership == effective_membership_state same_sender = requester.user.to_string() == old_state.sender if same_sender and same_membership and same_content: - defer.returnValue(old_state) + return old_state if old_membership in ["ban", "leave"] and action == "kick": raise AuthError(403, "The target user is not in the room") @@ -473,7 +473,7 @@ class RoomMemberHandler(object): ret = yield self._remote_join( requester, remote_room_hosts, room_id, target, content ) - defer.returnValue(ret) + return ret elif effective_membership_state == Membership.LEAVE: if not is_host_in_room: @@ -495,7 +495,7 @@ class RoomMemberHandler(object): res = yield self._remote_reject_invite( requester, remote_room_hosts, room_id, target ) - defer.returnValue(res) + return res res = yield self._local_membership_update( requester=requester, @@ -508,7 +508,7 @@ class RoomMemberHandler(object): content=content, require_consent=require_consent, ) - defer.returnValue(res) + return res @defer.inlineCallbacks def send_membership_event( @@ -596,11 +596,11 @@ class RoomMemberHandler(object): """ guest_access_id = current_state_ids.get((EventTypes.GuestAccess, ""), None) if not guest_access_id: - defer.returnValue(False) + return False guest_access = yield self.store.get_event(guest_access_id) - defer.returnValue( + return ( guest_access and guest_access.content and "guest_access" in guest_access.content @@ -635,7 +635,7 @@ class RoomMemberHandler(object): servers.remove(room_alias.domain) servers.insert(0, room_alias.domain) - defer.returnValue((RoomID.from_string(room_id), servers)) + return (RoomID.from_string(room_id), servers) @defer.inlineCallbacks def _get_inviter(self, user_id, room_id): @@ -643,7 +643,7 @@ class RoomMemberHandler(object): user_id=user_id, room_id=room_id ) if invite: - defer.returnValue(UserID.from_string(invite.sender)) + return UserID.from_string(invite.sender) @defer.inlineCallbacks def do_3pid_invite( @@ -708,11 +708,11 @@ class RoomMemberHandler(object): if "signatures" not in data: raise AuthError(401, "No signatures on 3pid binding") yield self._verify_any_signature(data, id_server) - defer.returnValue(data["mxid"]) + return data["mxid"] except IOError as e: logger.warn("Error from identity server lookup: %s" % (e,)) - defer.returnValue(None) + return None @defer.inlineCallbacks def _verify_any_signature(self, data, server_hostname): @@ -904,7 +904,7 @@ class RoomMemberHandler(object): if not public_keys: public_keys.append(fallback_public_key) display_name = data["display_name"] - defer.returnValue((token, public_keys, fallback_public_key, display_name)) + return (token, public_keys, fallback_public_key, display_name) @defer.inlineCallbacks def _is_host_in_room(self, current_state_ids): @@ -913,7 +913,7 @@ class RoomMemberHandler(object): create_event_id = current_state_ids.get(("m.room.create", "")) if len(current_state_ids) == 1 and create_event_id: # We can only get here if we're in the process of creating the room - defer.returnValue(True) + return True for etype, state_key in current_state_ids: if etype != EventTypes.Member or not self.hs.is_mine_id(state_key): @@ -925,16 +925,16 @@ class RoomMemberHandler(object): continue if event.membership == Membership.JOIN: - defer.returnValue(True) + return True - defer.returnValue(False) + return False @defer.inlineCallbacks def _is_server_notice_room(self, room_id): if self._server_notices_mxid is None: - defer.returnValue(False) + return False user_ids = yield self.store.get_users_in_room(room_id) - defer.returnValue(self._server_notices_mxid in user_ids) + return self._server_notices_mxid in user_ids class RoomMemberMasterHandler(RoomMemberHandler): @@ -978,7 +978,7 @@ class RoomMemberMasterHandler(RoomMemberHandler): ret = yield fed_handler.do_remotely_reject_invite( remote_room_hosts, room_id, target.to_string() ) - defer.returnValue(ret) + return ret except Exception as e: # if we were unable to reject the exception, just mark # it as rejected on our end and plough ahead. @@ -989,7 +989,7 @@ class RoomMemberMasterHandler(RoomMemberHandler): logger.warn("Failed to reject invite: %s", e) yield self.store.locally_reject_invite(target.to_string(), room_id) - defer.returnValue({}) + return {} def _user_joined_room(self, target, room_id): """Implements RoomMemberHandler._user_joined_room diff --git a/synapse/handlers/room_member_worker.py b/synapse/handlers/room_member_worker.py index fc873a3ba6..75e96ae1a2 100644 --- a/synapse/handlers/room_member_worker.py +++ b/synapse/handlers/room_member_worker.py @@ -53,7 +53,7 @@ class RoomMemberWorkerHandler(RoomMemberHandler): yield self._user_joined_room(user, room_id) - defer.returnValue(ret) + return ret def _remote_reject_invite(self, requester, remote_room_hosts, room_id, target): """Implements RoomMemberHandler._remote_reject_invite diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index ddc4430d03..cd5e90bacb 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -69,7 +69,7 @@ class SearchHandler(BaseHandler): # Scan through the old room for further predecessors room_id = predecessor["room_id"] - defer.returnValue(historical_room_ids) + return historical_room_ids @defer.inlineCallbacks def search(self, user, content, batch=None): @@ -186,13 +186,11 @@ class SearchHandler(BaseHandler): room_ids.intersection_update({batch_group_key}) if not room_ids: - defer.returnValue( - { - "search_categories": { - "room_events": {"results": [], "count": 0, "highlights": []} - } + return { + "search_categories": { + "room_events": {"results": [], "count": 0, "highlights": []} } - ) + } rank_map = {} # event_id -> rank of event allowed_events = [] @@ -455,4 +453,4 @@ class SearchHandler(BaseHandler): if global_next_batch: rooms_cat_res["next_batch"] = global_next_batch - defer.returnValue({"search_categories": {"room_events": rooms_cat_res}}) + return {"search_categories": {"room_events": rooms_cat_res}} diff --git a/synapse/handlers/state_deltas.py b/synapse/handlers/state_deltas.py index 6b364befd5..f065970c40 100644 --- a/synapse/handlers/state_deltas.py +++ b/synapse/handlers/state_deltas.py @@ -48,7 +48,7 @@ class StateDeltasHandler(object): if not event and not prev_event: logger.debug("Neither event exists: %r %r", prev_event_id, event_id) - defer.returnValue(None) + return None prev_value = None value = None @@ -62,8 +62,8 @@ class StateDeltasHandler(object): logger.debug("prev_value: %r -> value: %r", prev_value, value) if value == public_value and prev_value != public_value: - defer.returnValue(True) + return True elif value != public_value and prev_value == public_value: - defer.returnValue(False) + return False else: - defer.returnValue(None) + return None diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py index a0ee8db988..4449da6669 100644 --- a/synapse/handlers/stats.py +++ b/synapse/handlers/stats.py @@ -86,7 +86,7 @@ class StatsHandler(StateDeltasHandler): # If still None then the initial background update hasn't happened yet if self.pos is None: - defer.returnValue(None) + return None # Loop round handling deltas until we're up to date while True: @@ -328,6 +328,6 @@ class StatsHandler(StateDeltasHandler): == "world_readable" ) ): - defer.returnValue(True) + return True else: - defer.returnValue(False) + return False diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index cd1ac0a27a..4007284e5b 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -263,7 +263,7 @@ class SyncHandler(object): timeout, full_state, ) - defer.returnValue(res) + return res @defer.inlineCallbacks def _wait_for_sync_for_user(self, sync_config, since_token, timeout, full_state): @@ -303,7 +303,7 @@ class SyncHandler(object): lazy_loaded = "false" non_empty_sync_counter.labels(sync_type, lazy_loaded).inc() - defer.returnValue(result) + return result def current_sync_for_user(self, sync_config, since_token=None, full_state=False): """Get the sync for client needed to match what the server has now. @@ -317,7 +317,7 @@ class SyncHandler(object): user_id = user.to_string() rules = yield self.store.get_push_rules_for_user(user_id) rules = format_push_rules_for_user(user, rules) - defer.returnValue(rules) + return rules @defer.inlineCallbacks def ephemeral_by_room(self, sync_result_builder, now_token, since_token=None): @@ -378,7 +378,7 @@ class SyncHandler(object): event_copy = {k: v for (k, v) in iteritems(event) if k != "room_id"} ephemeral_by_room.setdefault(room_id, []).append(event_copy) - defer.returnValue((now_token, ephemeral_by_room)) + return (now_token, ephemeral_by_room) @defer.inlineCallbacks def _load_filtered_recents( @@ -426,8 +426,8 @@ class SyncHandler(object): recents = [] if not limited or block_all_timeline: - defer.returnValue( - TimelineBatch(events=recents, prev_batch=now_token, limited=False) + return TimelineBatch( + events=recents, prev_batch=now_token, limited=False ) filtering_factor = 2 @@ -490,12 +490,10 @@ class SyncHandler(object): prev_batch_token = now_token.copy_and_replace("room_key", room_key) - defer.returnValue( - TimelineBatch( - events=recents, - prev_batch=prev_batch_token, - limited=limited or newly_joined_room, - ) + return TimelineBatch( + events=recents, + prev_batch=prev_batch_token, + limited=limited or newly_joined_room, ) @defer.inlineCallbacks @@ -517,7 +515,7 @@ class SyncHandler(object): if event.is_state(): state_ids = state_ids.copy() state_ids[(event.type, event.state_key)] = event.event_id - defer.returnValue(state_ids) + return state_ids @defer.inlineCallbacks def get_state_at(self, room_id, stream_position, state_filter=StateFilter.all()): @@ -549,7 +547,7 @@ class SyncHandler(object): else: # no events in this room - so presumably no state state = {} - defer.returnValue(state) + return state @defer.inlineCallbacks def compute_summary(self, room_id, sync_config, batch, state, now_token): @@ -579,7 +577,7 @@ class SyncHandler(object): ) if not last_events: - defer.returnValue(None) + return None return last_event = last_events[-1] @@ -611,14 +609,14 @@ class SyncHandler(object): if name_id: name = yield self.store.get_event(name_id, allow_none=True) if name and name.content.get("name"): - defer.returnValue(summary) + return summary if canonical_alias_id: canonical_alias = yield self.store.get_event( canonical_alias_id, allow_none=True ) if canonical_alias and canonical_alias.content.get("alias"): - defer.returnValue(summary) + return summary me = sync_config.user.to_string() @@ -652,7 +650,7 @@ class SyncHandler(object): summary["m.heroes"] = sorted([user_id for user_id in gone_user_ids])[0:5] if not sync_config.filter_collection.lazy_load_members(): - defer.returnValue(summary) + return summary # ensure we send membership events for heroes if needed cache_key = (sync_config.user.to_string(), sync_config.device_id) @@ -686,7 +684,7 @@ class SyncHandler(object): cache.set(s.state_key, s.event_id) state[(EventTypes.Member, s.state_key)] = s - defer.returnValue(summary) + return summary def get_lazy_loaded_members_cache(self, cache_key): cache = self.lazy_loaded_members_cache.get(cache_key) @@ -871,14 +869,12 @@ class SyncHandler(object): if state_ids: state = yield self.store.get_events(list(state_ids.values())) - defer.returnValue( - { - (e.type, e.state_key): e - for e in sync_config.filter_collection.filter_room_state( - list(state.values()) - ) - } - ) + return { + (e.type, e.state_key): e + for e in sync_config.filter_collection.filter_room_state( + list(state.values()) + ) + } @defer.inlineCallbacks def unread_notifs_for_room_id(self, room_id, sync_config): @@ -894,11 +890,11 @@ class SyncHandler(object): notifs = yield self.store.get_unread_event_push_actions_by_room_for_user( room_id, sync_config.user.to_string(), last_unread_event_id ) - defer.returnValue(notifs) + return notifs # There is no new information in this period, so your notification # count is whatever it was last time. - defer.returnValue(None) + return None @defer.inlineCallbacks def generate_sync_result(self, sync_config, since_token=None, full_state=False): @@ -989,19 +985,17 @@ class SyncHandler(object): "Sync result for newly joined room %s: %r", room_id, joined_room ) - defer.returnValue( - SyncResult( - presence=sync_result_builder.presence, - account_data=sync_result_builder.account_data, - joined=sync_result_builder.joined, - invited=sync_result_builder.invited, - archived=sync_result_builder.archived, - to_device=sync_result_builder.to_device, - device_lists=device_lists, - groups=sync_result_builder.groups, - device_one_time_keys_count=one_time_key_counts, - next_batch=sync_result_builder.now_token, - ) + return SyncResult( + presence=sync_result_builder.presence, + account_data=sync_result_builder.account_data, + joined=sync_result_builder.joined, + invited=sync_result_builder.invited, + archived=sync_result_builder.archived, + to_device=sync_result_builder.to_device, + device_lists=device_lists, + groups=sync_result_builder.groups, + device_one_time_keys_count=one_time_key_counts, + next_batch=sync_result_builder.now_token, ) @measure_func("_generate_sync_entry_for_groups") @@ -1124,11 +1118,9 @@ class SyncHandler(object): # Remove any users that we still share a room with. newly_left_users -= users_who_share_room - defer.returnValue( - DeviceLists(changed=users_that_have_changed, left=newly_left_users) - ) + return DeviceLists(changed=users_that_have_changed, left=newly_left_users) else: - defer.returnValue(DeviceLists(changed=[], left=[])) + return DeviceLists(changed=[], left=[]) @defer.inlineCallbacks def _generate_sync_entry_for_to_device(self, sync_result_builder): @@ -1225,7 +1217,7 @@ class SyncHandler(object): sync_result_builder.account_data = account_data_for_user - defer.returnValue(account_data_by_room) + return account_data_by_room @defer.inlineCallbacks def _generate_sync_entry_for_presence( @@ -1325,7 +1317,7 @@ class SyncHandler(object): ) if not tags_by_room: logger.debug("no-oping sync") - defer.returnValue(([], [], [], [])) + return ([], [], [], []) ignored_account_data = yield self.store.get_global_account_data_by_type_for_user( "m.ignored_user_list", user_id=user_id @@ -1388,13 +1380,11 @@ class SyncHandler(object): newly_left_users -= newly_joined_or_invited_users - defer.returnValue( - ( - newly_joined_rooms, - newly_joined_or_invited_users, - newly_left_rooms, - newly_left_users, - ) + return ( + newly_joined_rooms, + newly_joined_or_invited_users, + newly_left_rooms, + newly_left_users, ) @defer.inlineCallbacks @@ -1414,13 +1404,13 @@ class SyncHandler(object): ) if rooms_changed: - defer.returnValue(True) + return True stream_id = RoomStreamToken.parse_stream_token(since_token.room_key).stream for room_id in sync_result_builder.joined_room_ids: if self.store.has_room_changed_since(room_id, stream_id): - defer.returnValue(True) - defer.returnValue(False) + return True + return False @defer.inlineCallbacks def _get_rooms_changed(self, sync_result_builder, ignored_users): @@ -1637,7 +1627,7 @@ class SyncHandler(object): ) room_entries.append(entry) - defer.returnValue((room_entries, invited, newly_joined_rooms, newly_left_rooms)) + return (room_entries, invited, newly_joined_rooms, newly_left_rooms) @defer.inlineCallbacks def _get_all_rooms(self, sync_result_builder, ignored_users): @@ -1711,7 +1701,7 @@ class SyncHandler(object): ) ) - defer.returnValue((room_entries, invited, [])) + return (room_entries, invited, []) @defer.inlineCallbacks def _generate_room_entry( @@ -1912,7 +1902,7 @@ class SyncHandler(object): joined_room_ids.add(room_id) joined_room_ids = frozenset(joined_room_ids) - defer.returnValue(joined_room_ids) + return joined_room_ids def _action_has_highlight(actions): diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index c3e0c8fc7e..6b661aa93d 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -140,7 +140,7 @@ class TypingHandler(object): if was_present: # No point sending another notification - defer.returnValue(None) + return None self._push_update(member=member, typing=True) @@ -173,7 +173,7 @@ class TypingHandler(object): def _stopped_typing(self, member): if member.user_id not in self._room_typing.get(member.room_id, set()): # No point - defer.returnValue(None) + return None self._member_typing_until.pop(member, None) self._member_last_federation_poke.pop(member, None) diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index 5de9630950..e53669e40d 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -133,7 +133,7 @@ class UserDirectoryHandler(StateDeltasHandler): # If still None then the initial background update hasn't happened yet if self.pos is None: - defer.returnValue(None) + return None # Loop round handling deltas until we're up to date while True: diff --git a/synapse/http/client.py b/synapse/http/client.py index 45d5010952..0ac20ebefc 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -294,7 +294,7 @@ class SimpleHttpClient(object): logger.info( "Received response to %s %s: %s", method, redact_uri(uri), response.code ) - defer.returnValue(response) + return response except Exception as e: incoming_responses_counter.labels(method, "ERR").inc() logger.info( @@ -345,7 +345,7 @@ class SimpleHttpClient(object): body = yield make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: - defer.returnValue(json.loads(body)) + return json.loads(body) else: raise HttpResponseException(response.code, response.phrase, body) @@ -385,7 +385,7 @@ class SimpleHttpClient(object): body = yield make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: - defer.returnValue(json.loads(body)) + return json.loads(body) else: raise HttpResponseException(response.code, response.phrase, body) @@ -410,7 +410,7 @@ class SimpleHttpClient(object): ValueError: if the response was not JSON """ body = yield self.get_raw(uri, args, headers=headers) - defer.returnValue(json.loads(body)) + return json.loads(body) @defer.inlineCallbacks def put_json(self, uri, json_body, args={}, headers=None): @@ -453,7 +453,7 @@ class SimpleHttpClient(object): body = yield make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: - defer.returnValue(json.loads(body)) + return json.loads(body) else: raise HttpResponseException(response.code, response.phrase, body) @@ -488,7 +488,7 @@ class SimpleHttpClient(object): body = yield make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: - defer.returnValue(body) + return body else: raise HttpResponseException(response.code, response.phrase, body) @@ -545,13 +545,11 @@ class SimpleHttpClient(object): except Exception as e: raise_from(SynapseError(502, ("Failed to download remote body: %s" % e)), e) - defer.returnValue( - ( - length, - resp_headers, - response.request.absoluteURI.decode("ascii"), - response.code, - ) + return ( + length, + resp_headers, + response.request.absoluteURI.decode("ascii"), + response.code, ) @@ -627,10 +625,10 @@ class CaptchaServerHttpClient(SimpleHttpClient): try: body = yield make_deferred_yieldable(readBody(response)) - defer.returnValue(body) + return body except PartialDownloadError as e: # twisted dislikes google's response, no content length. - defer.returnValue(e.response) + return e.response def encode_urlencode_args(args): diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index 054c321a20..c03ddb724f 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -177,7 +177,7 @@ class MatrixFederationAgent(object): res = yield make_deferred_yieldable( agent.request(method, uri, headers, bodyProducer) ) - defer.returnValue(res) + return res @defer.inlineCallbacks def _route_matrix_uri(self, parsed_uri, lookup_well_known=True): @@ -205,24 +205,20 @@ class MatrixFederationAgent(object): port = parsed_uri.port if port == -1: port = 8448 - defer.returnValue( - _RoutingResult( - host_header=parsed_uri.netloc, - tls_server_name=parsed_uri.host, - target_host=parsed_uri.host, - target_port=port, - ) + return _RoutingResult( + host_header=parsed_uri.netloc, + tls_server_name=parsed_uri.host, + target_host=parsed_uri.host, + target_port=port, ) if parsed_uri.port != -1: # there is an explicit port - defer.returnValue( - _RoutingResult( - host_header=parsed_uri.netloc, - tls_server_name=parsed_uri.host, - target_host=parsed_uri.host, - target_port=parsed_uri.port, - ) + return _RoutingResult( + host_header=parsed_uri.netloc, + tls_server_name=parsed_uri.host, + target_host=parsed_uri.host, + target_port=parsed_uri.port, ) if lookup_well_known: @@ -259,7 +255,7 @@ class MatrixFederationAgent(object): ) res = yield self._route_matrix_uri(new_uri, lookup_well_known=False) - defer.returnValue(res) + return res # try a SRV lookup service_name = b"_matrix._tcp.%s" % (parsed_uri.host,) @@ -283,13 +279,11 @@ class MatrixFederationAgent(object): parsed_uri.host.decode("ascii"), ) - defer.returnValue( - _RoutingResult( - host_header=parsed_uri.netloc, - tls_server_name=parsed_uri.host, - target_host=target_host, - target_port=port, - ) + return _RoutingResult( + host_header=parsed_uri.netloc, + tls_server_name=parsed_uri.host, + target_host=target_host, + target_port=port, ) @defer.inlineCallbacks @@ -314,7 +308,7 @@ class MatrixFederationAgent(object): if cache_period > 0: self._well_known_cache.set(server_name, result, cache_period) - defer.returnValue(result) + return result @defer.inlineCallbacks def _do_get_well_known(self, server_name): @@ -354,7 +348,7 @@ class MatrixFederationAgent(object): # after startup cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER) - defer.returnValue((None, cache_period)) + return (None, cache_period) result = parsed_body["m.server"].encode("ascii") @@ -369,7 +363,7 @@ class MatrixFederationAgent(object): else: cache_period = min(cache_period, WELL_KNOWN_MAX_CACHE_PERIOD) - defer.returnValue((result, cache_period)) + return (result, cache_period) @implementer(IStreamClientEndpoint) diff --git a/synapse/http/federation/srv_resolver.py b/synapse/http/federation/srv_resolver.py index ecc88f9b96..b32188766d 100644 --- a/synapse/http/federation/srv_resolver.py +++ b/synapse/http/federation/srv_resolver.py @@ -120,7 +120,7 @@ class SrvResolver(object): if cache_entry: if all(s.expires > now for s in cache_entry): servers = list(cache_entry) - defer.returnValue(servers) + return servers try: answers, _, _ = yield make_deferred_yieldable( @@ -129,7 +129,7 @@ class SrvResolver(object): except DNSNameError: # TODO: cache this. We can get the SOA out of the exception, and use # the negative-TTL value. - defer.returnValue([]) + return [] except DomainError as e: # We failed to resolve the name (other than a NameError) # Try something in the cache, else rereaise @@ -138,7 +138,7 @@ class SrvResolver(object): logger.warn( "Failed to resolve %r, falling back to cache. %r", service_name, e ) - defer.returnValue(list(cache_entry)) + return list(cache_entry) else: raise e @@ -169,4 +169,4 @@ class SrvResolver(object): ) self._cache[service_name] = list(servers) - defer.returnValue(servers) + return servers diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index e60334547e..d07d356464 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -158,7 +158,7 @@ def _handle_json_response(reactor, timeout_sec, request, response): response.code, response.phrase.decode("ascii", errors="replace"), ) - defer.returnValue(body) + return body class MatrixFederationHttpClient(object): @@ -256,7 +256,7 @@ class MatrixFederationHttpClient(object): response = yield self._send_request(request, **send_request_args) - defer.returnValue(response) + return response @defer.inlineCallbacks def _send_request( @@ -520,7 +520,7 @@ class MatrixFederationHttpClient(object): _flatten_response_never_received(e), ) raise - defer.returnValue(response) + return response def build_auth_headers( self, destination, method, url_bytes, content=None, destination_is=None @@ -644,7 +644,7 @@ class MatrixFederationHttpClient(object): self.reactor, self.default_timeout, request, response ) - defer.returnValue(body) + return body @defer.inlineCallbacks def post_json( @@ -713,7 +713,7 @@ class MatrixFederationHttpClient(object): body = yield _handle_json_response( self.reactor, _sec_timeout, request, response ) - defer.returnValue(body) + return body @defer.inlineCallbacks def get_json( @@ -778,7 +778,7 @@ class MatrixFederationHttpClient(object): self.reactor, self.default_timeout, request, response ) - defer.returnValue(body) + return body @defer.inlineCallbacks def delete_json( @@ -836,7 +836,7 @@ class MatrixFederationHttpClient(object): body = yield _handle_json_response( self.reactor, self.default_timeout, request, response ) - defer.returnValue(body) + return body @defer.inlineCallbacks def get_file( @@ -902,7 +902,7 @@ class MatrixFederationHttpClient(object): response.phrase.decode("ascii", errors="replace"), length, ) - defer.returnValue((length, headers)) + return (length, headers) class _ReadBodyToFileProtocol(protocol.Protocol): diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 96a4714d82..fb338ca223 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -89,7 +89,7 @@ the function becomes the operation name for the span. # We start yield we_wait # we finish - defer.returnValue(something_usual_and_useful) + return something_usual_and_useful Operation names can be explicitly set for functions by using ``trace_using_operation_name`` and @@ -113,7 +113,7 @@ Operation names can be explicitly set for functions by using # We start yield we_wait # we finish - defer.returnValue(something_usual_and_useful) + return something_usual_and_useful Contexts and carriers --------------------- @@ -694,7 +694,7 @@ def trace_servlet(servlet_name, func): }, ): result = yield defer.maybeDeferred(func, request, *args, **kwargs) - defer.returnValue(result) + return result return _trace_servlet_inner diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 7bb020cb45..41147d4292 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -101,7 +101,7 @@ class ModuleApi(object): ) user_id = yield self.register_user(localpart, displayname, emails) _, access_token = yield self.register_device(user_id) - defer.returnValue((user_id, access_token)) + return (user_id, access_token) def register_user(self, localpart, displayname=None, emails=[]): """Registers a new user with given localpart and optional displayname, emails. diff --git a/synapse/notifier.py b/synapse/notifier.py index 918ef64897..bd80c801b6 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -365,7 +365,7 @@ class Notifier(object): current_token = user_stream.current_token result = yield callback(prev_token, current_token) - defer.returnValue(result) + return result @defer.inlineCallbacks def get_events_for( @@ -400,7 +400,7 @@ class Notifier(object): @defer.inlineCallbacks def check_for_updates(before_token, after_token): if not after_token.is_after(before_token): - defer.returnValue(EventStreamResult([], (from_token, from_token))) + return EventStreamResult([], (from_token, from_token)) events = [] end_token = from_token @@ -440,7 +440,7 @@ class Notifier(object): events.extend(new_events) end_token = end_token.copy_and_replace(keyname, new_key) - defer.returnValue(EventStreamResult(events, (from_token, end_token))) + return EventStreamResult(events, (from_token, end_token)) user_id_for_stream = user.to_string() if is_peeking: @@ -465,18 +465,18 @@ class Notifier(object): from_token=from_token, ) - defer.returnValue(result) + return result @defer.inlineCallbacks def _get_room_ids(self, user, explicit_room_id): joined_room_ids = yield self.store.get_rooms_for_user(user.to_string()) if explicit_room_id: if explicit_room_id in joined_room_ids: - defer.returnValue(([explicit_room_id], True)) + return ([explicit_room_id], True) if (yield self._is_world_readable(explicit_room_id)): - defer.returnValue(([explicit_room_id], False)) + return ([explicit_room_id], False) raise AuthError(403, "Non-joined access not allowed") - defer.returnValue((joined_room_ids, True)) + return (joined_room_ids, True) @defer.inlineCallbacks def _is_world_readable(self, room_id): @@ -484,9 +484,9 @@ class Notifier(object): room_id, EventTypes.RoomHistoryVisibility, "" ) if state and "history_visibility" in state.content: - defer.returnValue(state.content["history_visibility"] == "world_readable") + return state.content["history_visibility"] == "world_readable" else: - defer.returnValue(False) + return False @log_function def remove_expired_streams(self): diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index c8a5b381da..c831975635 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -95,7 +95,7 @@ class BulkPushRuleEvaluator(object): invited ) - defer.returnValue(rules_by_user) + return rules_by_user @cached() def _get_rules_for_room(self, room_id): @@ -134,7 +134,7 @@ class BulkPushRuleEvaluator(object): pl_event = auth_events.get(POWER_KEY) - defer.returnValue((pl_event.content if pl_event else {}, sender_level)) + return (pl_event.content if pl_event else {}, sender_level) @defer.inlineCallbacks def action_for_event_by_user(self, event, context): @@ -283,13 +283,13 @@ class RulesForRoom(object): if state_group and self.state_group == state_group: logger.debug("Using cached rules for %r", self.room_id) self.room_push_rule_cache_metrics.inc_hits() - defer.returnValue(self.rules_by_user) + return self.rules_by_user with (yield self.linearizer.queue(())): if state_group and self.state_group == state_group: logger.debug("Using cached rules for %r", self.room_id) self.room_push_rule_cache_metrics.inc_hits() - defer.returnValue(self.rules_by_user) + return self.rules_by_user self.room_push_rule_cache_metrics.inc_misses() @@ -366,7 +366,7 @@ class RulesForRoom(object): logger.debug( "Returning push rules for %r %r", self.room_id, ret_rules_by_user.keys() ) - defer.returnValue(ret_rules_by_user) + return ret_rules_by_user @defer.inlineCallbacks def _update_rules_with_member_event_ids( diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 4e7b6a5531..5b15b0dbe7 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -258,17 +258,17 @@ class HttpPusher(object): @defer.inlineCallbacks def _process_one(self, push_action): if "notify" not in push_action["actions"]: - defer.returnValue(True) + return True tweaks = push_rule_evaluator.tweaks_for_actions(push_action["actions"]) badge = yield push_tools.get_badge_count(self.hs.get_datastore(), self.user_id) event = yield self.store.get_event(push_action["event_id"], allow_none=True) if event is None: - defer.returnValue(True) # It's been redacted + return True # It's been redacted rejected = yield self.dispatch_push(event, tweaks, badge) if rejected is False: - defer.returnValue(False) + return False if isinstance(rejected, list) or isinstance(rejected, tuple): for pk in rejected: @@ -282,7 +282,7 @@ class HttpPusher(object): else: logger.info("Pushkey %s was rejected: removing", pk) yield self.hs.remove_pusher(self.app_id, pk, self.user_id) - defer.returnValue(True) + return True @defer.inlineCallbacks def _build_notification_dict(self, event, tweaks, badge): @@ -302,7 +302,7 @@ class HttpPusher(object): ], } } - defer.returnValue(d) + return d ctx = yield push_tools.get_context_for_event( self.store, self.state_handler, event, self.user_id @@ -345,13 +345,13 @@ class HttpPusher(object): if "name" in ctx and len(ctx["name"]) > 0: d["notification"]["room_name"] = ctx["name"] - defer.returnValue(d) + return d @defer.inlineCallbacks def dispatch_push(self, event, tweaks, badge): notification_dict = yield self._build_notification_dict(event, tweaks, badge) if not notification_dict: - defer.returnValue([]) + return [] try: resp = yield self.http_client.post_json_get_json( self.url, notification_dict @@ -364,11 +364,11 @@ class HttpPusher(object): type(e), e, ) - defer.returnValue(False) + return False rejected = [] if "rejected" in resp: rejected = resp["rejected"] - defer.returnValue(rejected) + return rejected @defer.inlineCallbacks def _send_badge(self, badge): diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 521c6e2cd7..4245ce26f3 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -316,7 +316,7 @@ class Mailer(object): if not merge: room_vars["notifs"].append(notifvars) - defer.returnValue(room_vars) + return room_vars @defer.inlineCallbacks def get_notif_vars(self, notif, user_id, notif_event, room_state_ids): @@ -343,7 +343,7 @@ class Mailer(object): if messagevars is not None: ret["messages"].append(messagevars) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def get_message_vars(self, notif, event, room_state_ids): @@ -379,7 +379,7 @@ class Mailer(object): if "body" in event.content: ret["body_text_plain"] = event.content["body"] - defer.returnValue(ret) + return ret def add_text_message_vars(self, messagevars, event): msgformat = event.content.get("format") @@ -428,19 +428,16 @@ class Mailer(object): inviter_name = name_from_member_event(inviter_member_event) if room_name is None: - defer.returnValue( - INVITE_FROM_PERSON - % {"person": inviter_name, "app": self.app_name} - ) + return INVITE_FROM_PERSON % { + "person": inviter_name, + "app": self.app_name, + } else: - defer.returnValue( - INVITE_FROM_PERSON_TO_ROOM - % { - "person": inviter_name, - "room": room_name, - "app": self.app_name, - } - ) + return INVITE_FROM_PERSON_TO_ROOM % { + "person": inviter_name, + "room": room_name, + "app": self.app_name, + } sender_name = None if len(notifs_by_room[room_id]) == 1: @@ -454,26 +451,21 @@ class Mailer(object): sender_name = name_from_member_event(state_event) if sender_name is not None and room_name is not None: - defer.returnValue( - MESSAGE_FROM_PERSON_IN_ROOM - % { - "person": sender_name, - "room": room_name, - "app": self.app_name, - } - ) + return MESSAGE_FROM_PERSON_IN_ROOM % { + "person": sender_name, + "room": room_name, + "app": self.app_name, + } elif sender_name is not None: - defer.returnValue( - MESSAGE_FROM_PERSON - % {"person": sender_name, "app": self.app_name} - ) + return MESSAGE_FROM_PERSON % { + "person": sender_name, + "app": self.app_name, + } else: # There's more than one notification for this room, so just # say there are several if room_name is not None: - defer.returnValue( - MESSAGES_IN_ROOM % {"room": room_name, "app": self.app_name} - ) + return MESSAGES_IN_ROOM % {"room": room_name, "app": self.app_name} else: # If the room doesn't have a name, say who the messages # are from explicitly to avoid, "messages in the Bob room" @@ -493,24 +485,19 @@ class Mailer(object): ] ) - defer.returnValue( - MESSAGES_FROM_PERSON - % { - "person": descriptor_from_member_events( - member_events.values() - ), - "app": self.app_name, - } - ) + return MESSAGES_FROM_PERSON % { + "person": descriptor_from_member_events(member_events.values()), + "app": self.app_name, + } else: # Stuff's happened in multiple different rooms # ...but we still refer to the 'reason' room which triggered the mail if reason["room_name"] is not None: - defer.returnValue( - MESSAGES_IN_ROOM_AND_OTHERS - % {"room": reason["room_name"], "app": self.app_name} - ) + return MESSAGES_IN_ROOM_AND_OTHERS % { + "room": reason["room_name"], + "app": self.app_name, + } else: # If the reason room doesn't have a name, say who the messages # are from explicitly to avoid, "messages in the Bob room" @@ -527,13 +514,10 @@ class Mailer(object): [room_state_ids[room_id][("m.room.member", s)] for s in sender_ids] ) - defer.returnValue( - MESSAGES_FROM_PERSON_AND_OTHERS - % { - "person": descriptor_from_member_events(member_events.values()), - "app": self.app_name, - } - ) + return MESSAGES_FROM_PERSON_AND_OTHERS % { + "person": descriptor_from_member_events(member_events.values()), + "app": self.app_name, + } def make_room_link(self, room_id): if self.hs.config.email_riot_base_url: diff --git a/synapse/push/presentable_names.py b/synapse/push/presentable_names.py index 06056fbf4f..16a7e8e31d 100644 --- a/synapse/push/presentable_names.py +++ b/synapse/push/presentable_names.py @@ -55,7 +55,7 @@ def calculate_room_name( room_state_ids[("m.room.name", "")], allow_none=True ) if m_room_name and m_room_name.content and m_room_name.content["name"]: - defer.returnValue(m_room_name.content["name"]) + return m_room_name.content["name"] # does it have a canonical alias? if ("m.room.canonical_alias", "") in room_state_ids: @@ -68,7 +68,7 @@ def calculate_room_name( and canon_alias.content["alias"] and _looks_like_an_alias(canon_alias.content["alias"]) ): - defer.returnValue(canon_alias.content["alias"]) + return canon_alias.content["alias"] # at this point we're going to need to search the state by all state keys # for an event type, so rearrange the data structure @@ -82,10 +82,10 @@ def calculate_room_name( if alias_event and alias_event.content.get("aliases"): the_aliases = alias_event.content["aliases"] if len(the_aliases) > 0 and _looks_like_an_alias(the_aliases[0]): - defer.returnValue(the_aliases[0]) + return the_aliases[0] if not fallback_to_members: - defer.returnValue(None) + return None my_member_event = None if ("m.room.member", user_id) in room_state_ids: @@ -104,14 +104,13 @@ def calculate_room_name( ) if inviter_member_event: if fallback_to_single_member: - defer.returnValue( - "Invite from %s" - % (name_from_member_event(inviter_member_event),) + return "Invite from %s" % ( + name_from_member_event(inviter_member_event), ) else: return else: - defer.returnValue("Room Invite") + return "Room Invite" # we're going to have to generate a name based on who's in the room, # so find out who is in the room that isn't the user. @@ -154,17 +153,17 @@ def calculate_room_name( # return "Inviting %s" % ( # descriptor_from_member_events(third_party_invites) # ) - defer.returnValue("Inviting email address") + return "Inviting email address" else: - defer.returnValue(ALL_ALONE) + return ALL_ALONE else: - defer.returnValue(name_from_member_event(all_members[0])) + return name_from_member_event(all_members[0]) else: - defer.returnValue(ALL_ALONE) + return ALL_ALONE elif len(other_members) == 1 and not fallback_to_single_member: return else: - defer.returnValue(descriptor_from_member_events(other_members)) + return descriptor_from_member_events(other_members) def descriptor_from_member_events(member_events): diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py index e37269cdb9..a54051a726 100644 --- a/synapse/push/push_tools.py +++ b/synapse/push/push_tools.py @@ -39,7 +39,7 @@ def get_badge_count(store, user_id): # return one badge count per conversation, as count per # message is so noisy as to be almost useless badge += 1 if notifs["notify_count"] else 0 - defer.returnValue(badge) + return badge @defer.inlineCallbacks @@ -61,4 +61,4 @@ def get_context_for_event(store, state_handler, ev, user_id): sender_state_event = yield store.get_event(sender_state_event_id) ctx["sender_display_name"] = name_from_member_event(sender_state_event) - defer.returnValue(ctx) + return ctx diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index df6f670740..08e840fdc2 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -123,7 +123,7 @@ class PusherPool: ) pusher = yield self.start_pusher_by_id(app_id, pushkey, user_id) - defer.returnValue(pusher) + return pusher @defer.inlineCallbacks def remove_pushers_by_app_id_and_pushkey_not_user( @@ -224,7 +224,7 @@ class PusherPool: if pusher_dict: pusher = yield self._start_pusher(pusher_dict) - defer.returnValue(pusher) + return pusher @defer.inlineCallbacks def _start_pushers(self): @@ -293,7 +293,7 @@ class PusherPool: p.on_started(have_notifs) - defer.returnValue(p) + return p @defer.inlineCallbacks def remove_pusher(self, app_id, pushkey, user_id): diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index fe482e279f..f5074b101a 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -185,7 +185,7 @@ class ReplicationEndpoint(object): except RequestSendFailed as e: raise_from(SynapseError(502, "Failed to talk to master"), e) - defer.returnValue(result) + return result return send_request diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py index 61eafbe708..fed4f08820 100644 --- a/synapse/replication/http/federation.py +++ b/synapse/replication/http/federation.py @@ -80,7 +80,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint): payload = {"events": event_payloads, "backfilled": backfilled} - defer.returnValue(payload) + return payload @defer.inlineCallbacks def _handle_request(self, request): @@ -113,7 +113,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint): event_and_contexts, backfilled ) - defer.returnValue((200, {})) + return (200, {}) class ReplicationFederationSendEduRestServlet(ReplicationEndpoint): @@ -156,7 +156,7 @@ class ReplicationFederationSendEduRestServlet(ReplicationEndpoint): result = yield self.registry.on_edu(edu_type, origin, edu_content) - defer.returnValue((200, result)) + return (200, result) class ReplicationGetQueryRestServlet(ReplicationEndpoint): @@ -204,7 +204,7 @@ class ReplicationGetQueryRestServlet(ReplicationEndpoint): result = yield self.registry.on_query(query_type, args) - defer.returnValue((200, result)) + return (200, result) class ReplicationCleanRoomRestServlet(ReplicationEndpoint): @@ -238,7 +238,7 @@ class ReplicationCleanRoomRestServlet(ReplicationEndpoint): def _handle_request(self, request, room_id): yield self.store.clean_room_for_join(room_id) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/replication/http/login.py b/synapse/replication/http/login.py index 7c1197e5dd..f17d3a2da4 100644 --- a/synapse/replication/http/login.py +++ b/synapse/replication/http/login.py @@ -64,7 +64,7 @@ class RegisterDeviceReplicationServlet(ReplicationEndpoint): user_id, device_id, initial_display_name, is_guest ) - defer.returnValue((200, {"device_id": device_id, "access_token": access_token})) + return (200, {"device_id": device_id, "access_token": access_token}) def register_servlets(hs, http_server): diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py index 2d9cbbaefc..4217335d88 100644 --- a/synapse/replication/http/membership.py +++ b/synapse/replication/http/membership.py @@ -83,7 +83,7 @@ class ReplicationRemoteJoinRestServlet(ReplicationEndpoint): remote_room_hosts, room_id, user_id, event_content ) - defer.returnValue((200, {})) + return (200, {}) class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint): @@ -153,7 +153,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint): yield self.store.locally_reject_invite(user_id, room_id) ret = {} - defer.returnValue((200, ret)) + return (200, ret) class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint): diff --git a/synapse/replication/http/register.py b/synapse/replication/http/register.py index 2bf2173895..3341320a87 100644 --- a/synapse/replication/http/register.py +++ b/synapse/replication/http/register.py @@ -90,7 +90,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint): address=content["address"], ) - defer.returnValue((200, {})) + return (200, {}) class ReplicationPostRegisterActionsServlet(ReplicationEndpoint): @@ -143,7 +143,7 @@ class ReplicationPostRegisterActionsServlet(ReplicationEndpoint): bind_msisdn=bind_msisdn, ) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py index 034763fe99..eff7bd7305 100644 --- a/synapse/replication/http/send_event.py +++ b/synapse/replication/http/send_event.py @@ -85,7 +85,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint): "extra_users": [u.to_string() for u in extra_users], } - defer.returnValue(payload) + return payload @defer.inlineCallbacks def _handle_request(self, request, event_id): @@ -117,7 +117,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint): requester, event, context, ratelimit=ratelimit, extra_users=extra_users ) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index 7ef67a5a73..c10b85d2ff 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -158,7 +158,7 @@ class Stream(object): updates, current_token = yield self.get_updates_since(self.last_token) self.last_token = current_token - defer.returnValue((updates, current_token)) + return (updates, current_token) @defer.inlineCallbacks def get_updates_since(self, from_token): @@ -172,14 +172,14 @@ class Stream(object): sent over the replication steam. """ if from_token in ("NOW", "now"): - defer.returnValue(([], self.upto_token)) + return ([], self.upto_token) current_token = self.upto_token from_token = int(from_token) if from_token == current_token: - defer.returnValue(([], current_token)) + return ([], current_token) if self._LIMITED: rows = yield self.update_function( @@ -198,7 +198,7 @@ class Stream(object): if self._LIMITED and len(updates) >= MAX_EVENTS_BEHIND: raise Exception("stream %s has fallen behind" % (self.NAME)) - defer.returnValue((updates, current_token)) + return (updates, current_token) def current_token(self): """Gets the current token of the underlying streams. Should be provided @@ -297,7 +297,7 @@ class PushRulesStream(Stream): @defer.inlineCallbacks def update_function(self, from_token, to_token, limit): rows = yield self.store.get_all_push_rule_updates(from_token, to_token, limit) - defer.returnValue([(row[0], row[2]) for row in rows]) + return [(row[0], row[2]) for row in rows] class PushersStream(Stream): @@ -424,7 +424,7 @@ class AccountDataStream(Stream): for stream_id, user_id, account_data_type, content in global_results ) - defer.returnValue(results) + return results class GroupServerStream(Stream): diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py index 3d0694bb11..d97669c886 100644 --- a/synapse/replication/tcp/streams/events.py +++ b/synapse/replication/tcp/streams/events.py @@ -134,7 +134,7 @@ class EventsStream(Stream): all_updates = heapq.merge(event_updates, state_updates) - defer.returnValue(all_updates) + return all_updates @classmethod def parse_row(cls, row): diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 6888ae5590..0a7d9b81b2 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -84,7 +84,7 @@ class UsersRestServlet(RestServlet): ret = yield self.handlers.admin_handler.get_users() - defer.returnValue((200, ret)) + return (200, ret) class VersionServlet(RestServlet): @@ -227,7 +227,7 @@ class UserRegisterServlet(RestServlet): ) result = yield register._create_registration_details(user_id, body) - defer.returnValue((200, result)) + return (200, result) class WhoisRestServlet(RestServlet): @@ -252,7 +252,7 @@ class WhoisRestServlet(RestServlet): ret = yield self.handlers.admin_handler.get_whois(target_user) - defer.returnValue((200, ret)) + return (200, ret) class PurgeMediaCacheRestServlet(RestServlet): @@ -271,7 +271,7 @@ class PurgeMediaCacheRestServlet(RestServlet): ret = yield self.media_repository.delete_old_remote_media(before_ts) - defer.returnValue((200, ret)) + return (200, ret) class PurgeHistoryRestServlet(RestServlet): @@ -356,7 +356,7 @@ class PurgeHistoryRestServlet(RestServlet): room_id, token, delete_local_events=delete_local_events ) - defer.returnValue((200, {"purge_id": purge_id})) + return (200, {"purge_id": purge_id}) class PurgeHistoryStatusRestServlet(RestServlet): @@ -381,7 +381,7 @@ class PurgeHistoryStatusRestServlet(RestServlet): if purge_status is None: raise NotFoundError("purge id '%s' not found" % purge_id) - defer.returnValue((200, purge_status.asdict())) + return (200, purge_status.asdict()) class DeactivateAccountRestServlet(RestServlet): @@ -413,7 +413,7 @@ class DeactivateAccountRestServlet(RestServlet): else: id_server_unbind_result = "no-support" - defer.returnValue((200, {"id_server_unbind_result": id_server_unbind_result})) + return (200, {"id_server_unbind_result": id_server_unbind_result}) class ShutdownRoomRestServlet(RestServlet): @@ -531,16 +531,14 @@ class ShutdownRoomRestServlet(RestServlet): room_id, new_room_id, requester_user_id ) - defer.returnValue( - ( - 200, - { - "kicked_users": kicked_users, - "failed_to_kick_users": failed_to_kick_users, - "local_aliases": aliases_for_room, - "new_room_id": new_room_id, - }, - ) + return ( + 200, + { + "kicked_users": kicked_users, + "failed_to_kick_users": failed_to_kick_users, + "local_aliases": aliases_for_room, + "new_room_id": new_room_id, + }, ) @@ -564,7 +562,7 @@ class QuarantineMediaInRoom(RestServlet): room_id, requester.user.to_string() ) - defer.returnValue((200, {"num_quarantined": num_quarantined})) + return (200, {"num_quarantined": num_quarantined}) class ListMediaInRoom(RestServlet): @@ -585,7 +583,7 @@ class ListMediaInRoom(RestServlet): local_mxcs, remote_mxcs = yield self.store.get_media_mxcs_in_room(room_id) - defer.returnValue((200, {"local": local_mxcs, "remote": remote_mxcs})) + return (200, {"local": local_mxcs, "remote": remote_mxcs}) class ResetPasswordRestServlet(RestServlet): @@ -629,7 +627,7 @@ class ResetPasswordRestServlet(RestServlet): yield self._set_password_handler.set_password( target_user_id, new_password, requester ) - defer.returnValue((200, {})) + return (200, {}) class GetUsersPaginatedRestServlet(RestServlet): @@ -671,7 +669,7 @@ class GetUsersPaginatedRestServlet(RestServlet): logger.info("limit: %s, start: %s", limit, start) ret = yield self.handlers.admin_handler.get_users_paginate(order, start, limit) - defer.returnValue((200, ret)) + return (200, ret) @defer.inlineCallbacks def on_POST(self, request, target_user_id): @@ -699,7 +697,7 @@ class GetUsersPaginatedRestServlet(RestServlet): logger.info("limit: %s, start: %s", limit, start) ret = yield self.handlers.admin_handler.get_users_paginate(order, start, limit) - defer.returnValue((200, ret)) + return (200, ret) class SearchUsersRestServlet(RestServlet): @@ -742,7 +740,7 @@ class SearchUsersRestServlet(RestServlet): logger.info("term: %s ", term) ret = yield self.handlers.admin_handler.search_users(term) - defer.returnValue((200, ret)) + return (200, ret) class DeleteGroupAdminRestServlet(RestServlet): @@ -765,7 +763,7 @@ class DeleteGroupAdminRestServlet(RestServlet): raise SynapseError(400, "Can only delete local groups") yield self.group_server.delete_group(group_id, requester.user.to_string()) - defer.returnValue((200, {})) + return (200, {}) class AccountValidityRenewServlet(RestServlet): @@ -796,7 +794,7 @@ class AccountValidityRenewServlet(RestServlet): ) res = {"expiration_ts": expiration_ts} - defer.returnValue((200, res)) + return (200, res) ######################################################################################## diff --git a/synapse/rest/admin/server_notice_servlet.py b/synapse/rest/admin/server_notice_servlet.py index ee66838a0d..90c0ee15dc 100644 --- a/synapse/rest/admin/server_notice_servlet.py +++ b/synapse/rest/admin/server_notice_servlet.py @@ -87,7 +87,7 @@ class SendServerNoticeServlet(RestServlet): event_content=body["content"], ) - defer.returnValue((200, {"event_id": event.event_id})) + return (200, {"event_id": event.event_id}) def on_PUT(self, request, txn_id): return self.txns.fetch_or_execute_request( diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py index 57542c2b4b..4284738021 100644 --- a/synapse/rest/client/v1/directory.py +++ b/synapse/rest/client/v1/directory.py @@ -54,7 +54,7 @@ class ClientDirectoryServer(RestServlet): dir_handler = self.handlers.directory_handler res = yield dir_handler.get_association(room_alias) - defer.returnValue((200, res)) + return (200, res) @defer.inlineCallbacks def on_PUT(self, request, room_alias): @@ -87,7 +87,7 @@ class ClientDirectoryServer(RestServlet): requester, room_alias, room_id, servers ) - defer.returnValue((200, {})) + return (200, {}) @defer.inlineCallbacks def on_DELETE(self, request, room_alias): @@ -102,7 +102,7 @@ class ClientDirectoryServer(RestServlet): service.url, room_alias.to_string(), ) - defer.returnValue((200, {})) + return (200, {}) except InvalidClientCredentialsError: # fallback to default user behaviour if they aren't an AS pass @@ -118,7 +118,7 @@ class ClientDirectoryServer(RestServlet): "User %s deleted alias %s", user.to_string(), room_alias.to_string() ) - defer.returnValue((200, {})) + return (200, {}) class ClientDirectoryListServer(RestServlet): @@ -136,9 +136,7 @@ class ClientDirectoryListServer(RestServlet): if room is None: raise NotFoundError("Unknown room") - defer.returnValue( - (200, {"visibility": "public" if room["is_public"] else "private"}) - ) + return (200, {"visibility": "public" if room["is_public"] else "private"}) @defer.inlineCallbacks def on_PUT(self, request, room_id): @@ -151,7 +149,7 @@ class ClientDirectoryListServer(RestServlet): requester, room_id, visibility ) - defer.returnValue((200, {})) + return (200, {}) @defer.inlineCallbacks def on_DELETE(self, request, room_id): @@ -161,7 +159,7 @@ class ClientDirectoryListServer(RestServlet): requester, room_id, "private" ) - defer.returnValue((200, {})) + return (200, {}) class ClientAppserviceDirectoryListServer(RestServlet): @@ -195,4 +193,4 @@ class ClientAppserviceDirectoryListServer(RestServlet): requester.app_service.id, network_id, room_id, visibility ) - defer.returnValue((200, {})) + return (200, {}) diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py index d6de2b7360..53ebed2203 100644 --- a/synapse/rest/client/v1/events.py +++ b/synapse/rest/client/v1/events.py @@ -67,7 +67,7 @@ class EventStreamRestServlet(RestServlet): is_guest=is_guest, ) - defer.returnValue((200, chunk)) + return (200, chunk) def on_OPTIONS(self, request): return (200, {}) @@ -91,9 +91,9 @@ class EventRestServlet(RestServlet): time_now = self.clock.time_msec() if event: event = yield self._event_serializer.serialize_event(event, time_now) - defer.returnValue((200, event)) + return (200, event) else: - defer.returnValue((404, "Event not found.")) + return (404, "Event not found.") def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v1/initial_sync.py b/synapse/rest/client/v1/initial_sync.py index 0fe5f2d79b..70b8478e90 100644 --- a/synapse/rest/client/v1/initial_sync.py +++ b/synapse/rest/client/v1/initial_sync.py @@ -42,7 +42,7 @@ class InitialSyncRestServlet(RestServlet): include_archived=include_archived, ) - defer.returnValue((200, content)) + return (200, content) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 0d05945f0a..5762b9fd06 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -152,7 +152,7 @@ class LoginRestServlet(RestServlet): well_known_data = self._well_known_builder.get_well_known() if well_known_data: result["well_known"] = well_known_data - defer.returnValue((200, result)) + return (200, result) @defer.inlineCallbacks def _do_other_login(self, login_submission): @@ -212,7 +212,7 @@ class LoginRestServlet(RestServlet): result = yield self._register_device_with_callback( canonical_user_id, login_submission, callback_3pid ) - defer.returnValue(result) + return result # No password providers were able to handle this 3pid # Check local store @@ -241,7 +241,7 @@ class LoginRestServlet(RestServlet): result = yield self._register_device_with_callback( canonical_user_id, login_submission, callback ) - defer.returnValue(result) + return result @defer.inlineCallbacks def _register_device_with_callback(self, user_id, login_submission, callback=None): @@ -273,7 +273,7 @@ class LoginRestServlet(RestServlet): if callback is not None: yield callback(result) - defer.returnValue(result) + return result @defer.inlineCallbacks def do_token_login(self, login_submission): @@ -284,7 +284,7 @@ class LoginRestServlet(RestServlet): ) result = yield self._register_device_with_callback(user_id, login_submission) - defer.returnValue(result) + return result @defer.inlineCallbacks def do_jwt_login(self, login_submission): @@ -321,7 +321,7 @@ class LoginRestServlet(RestServlet): result = yield self._register_device_with_callback( registered_user_id, login_submission ) - defer.returnValue(result) + return result class BaseSSORedirectServlet(RestServlet): @@ -395,7 +395,7 @@ class CasTicketServlet(RestServlet): # even if that's being used old-http style to signal end-of-data body = pde.response result = yield self.handle_cas_response(request, body, client_redirect_url) - defer.returnValue(result) + return result def handle_cas_response(self, request, cas_response_body, client_redirect_url): user, attributes = self.parse_cas_response(cas_response_body) diff --git a/synapse/rest/client/v1/logout.py b/synapse/rest/client/v1/logout.py index cd711be519..2769f3a189 100644 --- a/synapse/rest/client/v1/logout.py +++ b/synapse/rest/client/v1/logout.py @@ -49,7 +49,7 @@ class LogoutRestServlet(RestServlet): requester.user.to_string(), requester.device_id ) - defer.returnValue((200, {})) + return (200, {}) class LogoutAllRestServlet(RestServlet): @@ -75,7 +75,7 @@ class LogoutAllRestServlet(RestServlet): # .. and then delete any access tokens which weren't associated with # devices. yield self._auth_handler.delete_access_tokens_for_user(user_id) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py index 3e87f0fdb3..1eb1068c98 100644 --- a/synapse/rest/client/v1/presence.py +++ b/synapse/rest/client/v1/presence.py @@ -56,7 +56,7 @@ class PresenceStatusRestServlet(RestServlet): state = yield self.presence_handler.get_state(target_user=user) state = format_user_presence_state(state, self.clock.time_msec()) - defer.returnValue((200, state)) + return (200, state) @defer.inlineCallbacks def on_PUT(self, request, user_id): @@ -88,7 +88,7 @@ class PresenceStatusRestServlet(RestServlet): if self.hs.config.use_presence: yield self.presence_handler.set_state(user, state) - defer.returnValue((200, {})) + return (200, {}) def on_OPTIONS(self, request): return (200, {}) diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index 4d8ab1f47e..2657ae45bb 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -48,7 +48,7 @@ class ProfileDisplaynameRestServlet(RestServlet): if displayname is not None: ret["displayname"] = displayname - defer.returnValue((200, ret)) + return (200, ret) @defer.inlineCallbacks def on_PUT(self, request, user_id): @@ -61,11 +61,11 @@ class ProfileDisplaynameRestServlet(RestServlet): try: new_name = content["displayname"] except Exception: - defer.returnValue((400, "Unable to parse name")) + return (400, "Unable to parse name") yield self.profile_handler.set_displayname(user, requester, new_name, is_admin) - defer.returnValue((200, {})) + return (200, {}) def on_OPTIONS(self, request, user_id): return (200, {}) @@ -98,7 +98,7 @@ class ProfileAvatarURLRestServlet(RestServlet): if avatar_url is not None: ret["avatar_url"] = avatar_url - defer.returnValue((200, ret)) + return (200, ret) @defer.inlineCallbacks def on_PUT(self, request, user_id): @@ -110,11 +110,11 @@ class ProfileAvatarURLRestServlet(RestServlet): try: new_name = content["avatar_url"] except Exception: - defer.returnValue((400, "Unable to parse name")) + return (400, "Unable to parse name") yield self.profile_handler.set_avatar_url(user, requester, new_name, is_admin) - defer.returnValue((200, {})) + return (200, {}) def on_OPTIONS(self, request, user_id): return (200, {}) @@ -150,7 +150,7 @@ class ProfileRestServlet(RestServlet): if avatar_url is not None: ret["avatar_url"] = avatar_url - defer.returnValue((200, ret)) + return (200, ret) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index e635efb420..c3ae8b98a8 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -69,7 +69,7 @@ class PushRuleRestServlet(RestServlet): if "attr" in spec: yield self.set_rule_attr(user_id, spec, content) self.notify_user(user_id) - defer.returnValue((200, {})) + return (200, {}) if spec["rule_id"].startswith("."): # Rule ids starting with '.' are reserved for server default rules. @@ -106,7 +106,7 @@ class PushRuleRestServlet(RestServlet): except RuleNotFoundException as e: raise SynapseError(400, str(e)) - defer.returnValue((200, {})) + return (200, {}) @defer.inlineCallbacks def on_DELETE(self, request, path): @@ -123,7 +123,7 @@ class PushRuleRestServlet(RestServlet): try: yield self.store.delete_push_rule(user_id, namespaced_rule_id) self.notify_user(user_id) - defer.returnValue((200, {})) + return (200, {}) except StoreError as e: if e.code == 404: raise NotFoundError() @@ -151,10 +151,10 @@ class PushRuleRestServlet(RestServlet): ) if path[0] == "": - defer.returnValue((200, rules)) + return (200, rules) elif path[0] == "global": result = _filter_ruleset_with_path(rules["global"], path[1:]) - defer.returnValue((200, result)) + return (200, result) else: raise UnrecognizedRequestError() diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index e9246018df..ebc3dec516 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -62,7 +62,7 @@ class PushersRestServlet(RestServlet): if k not in allowed_keys: del p[k] - defer.returnValue((200, {"pushers": pushers})) + return (200, {"pushers": pushers}) def on_OPTIONS(self, _): return 200, {} @@ -94,7 +94,7 @@ class PushersSetRestServlet(RestServlet): yield self.pusher_pool.remove_pusher( content["app_id"], content["pushkey"], user_id=user.to_string() ) - defer.returnValue((200, {})) + return (200, {}) assert_params_in_dict( content, @@ -143,7 +143,7 @@ class PushersSetRestServlet(RestServlet): self.notifier.on_new_replication_data() - defer.returnValue((200, {})) + return (200, {}) def on_OPTIONS(self, _): return 200, {} @@ -190,7 +190,7 @@ class PushersRemoveRestServlet(RestServlet): ) request.write(PushersRemoveRestServlet.SUCCESS_HTML) finish_request(request) - defer.returnValue(None) + return None def on_OPTIONS(self, _): return 200, {} diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 7709c2d705..012e7a44a6 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -85,7 +85,7 @@ class RoomCreateRestServlet(TransactionRestServlet): requester, self.get_room_config(request) ) - defer.returnValue((200, info)) + return (200, info) def get_room_config(self, request): user_supplied_config = parse_json_object_from_request(request) @@ -155,9 +155,9 @@ class RoomStateEventRestServlet(TransactionRestServlet): if format == "event": event = format_event_for_client_v2(data.get_dict()) - defer.returnValue((200, event)) + return (200, event) elif format == "content": - defer.returnValue((200, data.get_dict()["content"])) + return (200, data.get_dict()["content"]) @defer.inlineCallbacks def on_PUT(self, request, room_id, event_type, state_key, txn_id=None): @@ -192,7 +192,7 @@ class RoomStateEventRestServlet(TransactionRestServlet): ret = {} if event: ret = {"event_id": event.event_id} - defer.returnValue((200, ret)) + return (200, ret) # TODO: Needs unit testing for generic events + feedback @@ -226,7 +226,7 @@ class RoomSendEventRestServlet(TransactionRestServlet): requester, event_dict, txn_id=txn_id ) - defer.returnValue((200, {"event_id": event.event_id})) + return (200, {"event_id": event.event_id}) def on_GET(self, request, room_id, event_type, txn_id): return (200, "Not implemented") @@ -289,7 +289,7 @@ class JoinRoomAliasServlet(TransactionRestServlet): third_party_signed=content.get("third_party_signed", None), ) - defer.returnValue((200, {"room_id": room_id})) + return (200, {"room_id": room_id}) def on_PUT(self, request, room_identifier, txn_id): return self.txns.fetch_or_execute_request( @@ -342,7 +342,7 @@ class PublicRoomListRestServlet(TransactionRestServlet): limit=limit, since_token=since_token ) - defer.returnValue((200, data)) + return (200, data) @defer.inlineCallbacks def on_POST(self, request): @@ -387,7 +387,7 @@ class PublicRoomListRestServlet(TransactionRestServlet): network_tuple=network_tuple, ) - defer.returnValue((200, data)) + return (200, data) # TODO: Needs unit testing @@ -438,7 +438,7 @@ class RoomMemberListRestServlet(RestServlet): continue chunk.append(event) - defer.returnValue((200, {"chunk": chunk})) + return (200, {"chunk": chunk}) # deprecated in favour of /members?membership=join? @@ -459,7 +459,7 @@ class JoinedRoomMemberListRestServlet(RestServlet): requester, room_id ) - defer.returnValue((200, {"joined": users_with_profile})) + return (200, {"joined": users_with_profile}) # TODO: Needs better unit testing @@ -492,7 +492,7 @@ class RoomMessageListRestServlet(RestServlet): event_filter=event_filter, ) - defer.returnValue((200, msgs)) + return (200, msgs) # TODO: Needs unit testing @@ -513,7 +513,7 @@ class RoomStateRestServlet(RestServlet): user_id=requester.user.to_string(), is_guest=requester.is_guest, ) - defer.returnValue((200, events)) + return (200, events) # TODO: Needs unit testing @@ -532,7 +532,7 @@ class RoomInitialSyncRestServlet(RestServlet): content = yield self.initial_sync_handler.room_initial_sync( room_id=room_id, requester=requester, pagin_config=pagination_config ) - defer.returnValue((200, content)) + return (200, content) class RoomEventServlet(RestServlet): @@ -555,9 +555,9 @@ class RoomEventServlet(RestServlet): time_now = self.clock.time_msec() if event: event = yield self._event_serializer.serialize_event(event, time_now) - defer.returnValue((200, event)) + return (200, event) else: - defer.returnValue((404, "Event not found.")) + return (404, "Event not found.") class RoomEventContextServlet(RestServlet): @@ -607,7 +607,7 @@ class RoomEventContextServlet(RestServlet): results["state"], time_now ) - defer.returnValue((200, results)) + return (200, results) class RoomForgetRestServlet(TransactionRestServlet): @@ -626,7 +626,7 @@ class RoomForgetRestServlet(TransactionRestServlet): yield self.room_member_handler.forget(user=requester.user, room_id=room_id) - defer.returnValue((200, {})) + return (200, {}) def on_PUT(self, request, room_id, txn_id): return self.txns.fetch_or_execute_request( @@ -676,7 +676,7 @@ class RoomMembershipRestServlet(TransactionRestServlet): requester, txn_id, ) - defer.returnValue((200, {})) + return (200, {}) return target = requester.user @@ -703,7 +703,7 @@ class RoomMembershipRestServlet(TransactionRestServlet): if membership_action == "join": return_value["room_id"] = room_id - defer.returnValue((200, return_value)) + return (200, return_value) def _has_3pid_invite_keys(self, content): for key in {"id_server", "medium", "address"}: @@ -745,7 +745,7 @@ class RoomRedactEventRestServlet(TransactionRestServlet): txn_id=txn_id, ) - defer.returnValue((200, {"event_id": event.event_id})) + return (200, {"event_id": event.event_id}) def on_PUT(self, request, room_id, event_id, txn_id): return self.txns.fetch_or_execute_request( @@ -790,7 +790,7 @@ class RoomTypingRestServlet(RestServlet): target_user=target_user, auth_user=requester.user, room_id=room_id ) - defer.returnValue((200, {})) + return (200, {}) class SearchRestServlet(RestServlet): @@ -812,7 +812,7 @@ class SearchRestServlet(RestServlet): requester.user, content, batch ) - defer.returnValue((200, results)) + return (200, results) class JoinedRoomsRestServlet(RestServlet): @@ -828,7 +828,7 @@ class JoinedRoomsRestServlet(RestServlet): requester = yield self.auth.get_user_by_req(request, allow_guest=True) room_ids = yield self.store.get_rooms_for_user(requester.user.to_string()) - defer.returnValue((200, {"joined_rooms": list(room_ids)})) + return (200, {"joined_rooms": list(room_ids)}) def register_txn_path(servlet, regex_string, http_server, with_get=False): diff --git a/synapse/rest/client/v1/voip.py b/synapse/rest/client/v1/voip.py index 41b3171ac8..497cddf8b8 100644 --- a/synapse/rest/client/v1/voip.py +++ b/synapse/rest/client/v1/voip.py @@ -60,18 +60,16 @@ class VoipRestServlet(RestServlet): password = turnPassword else: - defer.returnValue((200, {})) + return (200, {}) - defer.returnValue( - ( - 200, - { - "username": username, - "password": password, - "ttl": userLifetime / 1000, - "uris": turnUris, - }, - ) + return ( + 200, + { + "username": username, + "password": password, + "ttl": userLifetime / 1000, + "uris": turnUris, + }, ) def on_OPTIONS(self, request): diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index f143d8b85c..7ac456812a 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -117,7 +117,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): # Wrap the session id in a JSON object ret = {"sid": sid} - defer.returnValue((200, ret)) + return (200, ret) @defer.inlineCallbacks def send_password_reset(self, email, client_secret, send_attempt, next_link=None): @@ -149,7 +149,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): # Check that the send_attempt is higher than previous attempts if send_attempt <= last_send_attempt: # If not, just return a success without sending an email - defer.returnValue(session_id) + return session_id else: # An non-validated session does not exist yet. # Generate a session id @@ -185,7 +185,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): token_expires, ) - defer.returnValue(session_id) + return session_id class MsisdnPasswordRequestTokenRestServlet(RestServlet): @@ -221,7 +221,7 @@ class MsisdnPasswordRequestTokenRestServlet(RestServlet): raise SynapseError(400, "MSISDN not found", Codes.THREEPID_NOT_FOUND) ret = yield self.identity_handler.requestMsisdnToken(**body) - defer.returnValue((200, ret)) + return (200, ret) class PasswordResetSubmitTokenServlet(RestServlet): @@ -279,7 +279,7 @@ class PasswordResetSubmitTokenServlet(RestServlet): request.setResponseCode(302) request.setHeader("Location", next_link) finish_request(request) - defer.returnValue(None) + return None # Otherwise show the success template html = self.config.email_password_reset_success_html_content @@ -295,7 +295,7 @@ class PasswordResetSubmitTokenServlet(RestServlet): request.write(html.encode("utf-8")) finish_request(request) - defer.returnValue(None) + return None def load_jinja2_template(self, template_dir, template_filename, template_vars): """Loads a jinja2 template with variables to insert @@ -330,7 +330,7 @@ class PasswordResetSubmitTokenServlet(RestServlet): ) response_code = 200 if valid else 400 - defer.returnValue((response_code, {"success": valid})) + return (response_code, {"success": valid}) class PasswordRestServlet(RestServlet): @@ -399,7 +399,7 @@ class PasswordRestServlet(RestServlet): yield self._set_password_handler.set_password(user_id, new_password, requester) - defer.returnValue((200, {})) + return (200, {}) def on_OPTIONS(self, _): return 200, {} @@ -434,7 +434,7 @@ class DeactivateAccountRestServlet(RestServlet): yield self._deactivate_account_handler.deactivate_account( requester.user.to_string(), erase ) - defer.returnValue((200, {})) + return (200, {}) yield self.auth_handler.validate_user_via_ui_auth( requester, body, self.hs.get_ip_from_request(request) @@ -447,7 +447,7 @@ class DeactivateAccountRestServlet(RestServlet): else: id_server_unbind_result = "no-support" - defer.returnValue((200, {"id_server_unbind_result": id_server_unbind_result})) + return (200, {"id_server_unbind_result": id_server_unbind_result}) class EmailThreepidRequestTokenRestServlet(RestServlet): @@ -481,7 +481,7 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) ret = yield self.identity_handler.requestEmailToken(**body) - defer.returnValue((200, ret)) + return (200, ret) class MsisdnThreepidRequestTokenRestServlet(RestServlet): @@ -516,7 +516,7 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet): raise SynapseError(400, "MSISDN is already in use", Codes.THREEPID_IN_USE) ret = yield self.identity_handler.requestMsisdnToken(**body) - defer.returnValue((200, ret)) + return (200, ret) class ThreepidRestServlet(RestServlet): @@ -536,7 +536,7 @@ class ThreepidRestServlet(RestServlet): threepids = yield self.datastore.user_get_threepids(requester.user.to_string()) - defer.returnValue((200, {"threepids": threepids})) + return (200, {"threepids": threepids}) @defer.inlineCallbacks def on_POST(self, request): @@ -568,7 +568,7 @@ class ThreepidRestServlet(RestServlet): logger.debug("Binding threepid %s to %s", threepid, user_id) yield self.identity_handler.bind_threepid(threePidCreds, user_id) - defer.returnValue((200, {})) + return (200, {}) class ThreepidDeleteRestServlet(RestServlet): @@ -603,7 +603,7 @@ class ThreepidDeleteRestServlet(RestServlet): else: id_server_unbind_result = "no-support" - defer.returnValue((200, {"id_server_unbind_result": id_server_unbind_result})) + return (200, {"id_server_unbind_result": id_server_unbind_result}) class WhoamiRestServlet(RestServlet): @@ -617,7 +617,7 @@ class WhoamiRestServlet(RestServlet): def on_GET(self, request): requester = yield self.auth.get_user_by_req(request) - defer.returnValue((200, {"user_id": requester.user.to_string()})) + return (200, {"user_id": requester.user.to_string()}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/account_data.py b/synapse/rest/client/v2_alpha/account_data.py index f155c26259..98f2f6f4b5 100644 --- a/synapse/rest/client/v2_alpha/account_data.py +++ b/synapse/rest/client/v2_alpha/account_data.py @@ -55,7 +55,7 @@ class AccountDataServlet(RestServlet): self.notifier.on_new_event("account_data_key", max_id, users=[user_id]) - defer.returnValue((200, {})) + return (200, {}) @defer.inlineCallbacks def on_GET(self, request, user_id, account_data_type): @@ -70,7 +70,7 @@ class AccountDataServlet(RestServlet): if event is None: raise NotFoundError("Account data not found") - defer.returnValue((200, event)) + return (200, event) class RoomAccountDataServlet(RestServlet): @@ -112,7 +112,7 @@ class RoomAccountDataServlet(RestServlet): self.notifier.on_new_event("account_data_key", max_id, users=[user_id]) - defer.returnValue((200, {})) + return (200, {}) @defer.inlineCallbacks def on_GET(self, request, user_id, room_id, account_data_type): @@ -127,7 +127,7 @@ class RoomAccountDataServlet(RestServlet): if event is None: raise NotFoundError("Room account data not found") - defer.returnValue((200, event)) + return (200, event) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/account_validity.py b/synapse/rest/client/v2_alpha/account_validity.py index d29c10b83d..133c61900a 100644 --- a/synapse/rest/client/v2_alpha/account_validity.py +++ b/synapse/rest/client/v2_alpha/account_validity.py @@ -58,7 +58,7 @@ class AccountValidityRenewServlet(RestServlet): ) request.write(AccountValidityRenewServlet.SUCCESS_HTML) finish_request(request) - defer.returnValue(None) + return None class AccountValiditySendMailServlet(RestServlet): @@ -87,7 +87,7 @@ class AccountValiditySendMailServlet(RestServlet): user_id = requester.user.to_string() yield self.account_activity_handler.send_renewal_email_to_user(user_id) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index bebc2951e7..f21aff39e5 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -207,7 +207,7 @@ class AuthRestServlet(RestServlet): request.write(html_bytes) finish_request(request) - defer.returnValue(None) + return None elif stagetype == LoginType.TERMS: if ("session" not in request.args or len(request.args["session"])) == 0: raise SynapseError(400, "No session supplied") @@ -239,7 +239,7 @@ class AuthRestServlet(RestServlet): request.write(html_bytes) finish_request(request) - defer.returnValue(None) + return None else: raise SynapseError(404, "Unknown auth stage type") diff --git a/synapse/rest/client/v2_alpha/capabilities.py b/synapse/rest/client/v2_alpha/capabilities.py index fc7e2f4dd5..a4fa45fe11 100644 --- a/synapse/rest/client/v2_alpha/capabilities.py +++ b/synapse/rest/client/v2_alpha/capabilities.py @@ -58,7 +58,7 @@ class CapabilitiesRestServlet(RestServlet): "m.change_password": {"enabled": change_password}, } } - defer.returnValue((200, response)) + return (200, response) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py index d279229d74..9adf76cc0c 100644 --- a/synapse/rest/client/v2_alpha/devices.py +++ b/synapse/rest/client/v2_alpha/devices.py @@ -48,7 +48,7 @@ class DevicesRestServlet(RestServlet): devices = yield self.device_handler.get_devices_by_user( requester.user.to_string() ) - defer.returnValue((200, {"devices": devices})) + return (200, {"devices": devices}) class DeleteDevicesRestServlet(RestServlet): @@ -91,7 +91,7 @@ class DeleteDevicesRestServlet(RestServlet): yield self.device_handler.delete_devices( requester.user.to_string(), body["devices"] ) - defer.returnValue((200, {})) + return (200, {}) class DeviceRestServlet(RestServlet): @@ -114,7 +114,7 @@ class DeviceRestServlet(RestServlet): device = yield self.device_handler.get_device( requester.user.to_string(), device_id ) - defer.returnValue((200, device)) + return (200, device) @interactive_auth_handler @defer.inlineCallbacks @@ -137,7 +137,7 @@ class DeviceRestServlet(RestServlet): ) yield self.device_handler.delete_device(requester.user.to_string(), device_id) - defer.returnValue((200, {})) + return (200, {}) @defer.inlineCallbacks def on_PUT(self, request, device_id): @@ -147,7 +147,7 @@ class DeviceRestServlet(RestServlet): yield self.device_handler.update_device( requester.user.to_string(), device_id, body ) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/filter.py b/synapse/rest/client/v2_alpha/filter.py index 3f0adf4a21..22be0ee3c5 100644 --- a/synapse/rest/client/v2_alpha/filter.py +++ b/synapse/rest/client/v2_alpha/filter.py @@ -56,7 +56,7 @@ class GetFilterRestServlet(RestServlet): user_localpart=target_user.localpart, filter_id=filter_id ) - defer.returnValue((200, filter.get_filter_json())) + return (200, filter.get_filter_json()) except (KeyError, StoreError): raise SynapseError(400, "No such filter", errcode=Codes.NOT_FOUND) @@ -89,7 +89,7 @@ class CreateFilterRestServlet(RestServlet): user_localpart=target_user.localpart, user_filter=content ) - defer.returnValue((200, {"filter_id": str(filter_id)})) + return (200, {"filter_id": str(filter_id)}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index a312dd2593..e629c4256d 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -47,7 +47,7 @@ class GroupServlet(RestServlet): group_id, requester_user_id ) - defer.returnValue((200, group_description)) + return (200, group_description) @defer.inlineCallbacks def on_POST(self, request, group_id): @@ -59,7 +59,7 @@ class GroupServlet(RestServlet): group_id, requester_user_id, content ) - defer.returnValue((200, {})) + return (200, {}) class GroupSummaryServlet(RestServlet): @@ -83,7 +83,7 @@ class GroupSummaryServlet(RestServlet): group_id, requester_user_id ) - defer.returnValue((200, get_group_summary)) + return (200, get_group_summary) class GroupSummaryRoomsCatServlet(RestServlet): @@ -120,7 +120,7 @@ class GroupSummaryRoomsCatServlet(RestServlet): content=content, ) - defer.returnValue((200, resp)) + return (200, resp) @defer.inlineCallbacks def on_DELETE(self, request, group_id, category_id, room_id): @@ -131,7 +131,7 @@ class GroupSummaryRoomsCatServlet(RestServlet): group_id, requester_user_id, room_id=room_id, category_id=category_id ) - defer.returnValue((200, resp)) + return (200, resp) class GroupCategoryServlet(RestServlet): @@ -157,7 +157,7 @@ class GroupCategoryServlet(RestServlet): group_id, requester_user_id, category_id=category_id ) - defer.returnValue((200, category)) + return (200, category) @defer.inlineCallbacks def on_PUT(self, request, group_id, category_id): @@ -169,7 +169,7 @@ class GroupCategoryServlet(RestServlet): group_id, requester_user_id, category_id=category_id, content=content ) - defer.returnValue((200, resp)) + return (200, resp) @defer.inlineCallbacks def on_DELETE(self, request, group_id, category_id): @@ -180,7 +180,7 @@ class GroupCategoryServlet(RestServlet): group_id, requester_user_id, category_id=category_id ) - defer.returnValue((200, resp)) + return (200, resp) class GroupCategoriesServlet(RestServlet): @@ -204,7 +204,7 @@ class GroupCategoriesServlet(RestServlet): group_id, requester_user_id ) - defer.returnValue((200, category)) + return (200, category) class GroupRoleServlet(RestServlet): @@ -228,7 +228,7 @@ class GroupRoleServlet(RestServlet): group_id, requester_user_id, role_id=role_id ) - defer.returnValue((200, category)) + return (200, category) @defer.inlineCallbacks def on_PUT(self, request, group_id, role_id): @@ -240,7 +240,7 @@ class GroupRoleServlet(RestServlet): group_id, requester_user_id, role_id=role_id, content=content ) - defer.returnValue((200, resp)) + return (200, resp) @defer.inlineCallbacks def on_DELETE(self, request, group_id, role_id): @@ -251,7 +251,7 @@ class GroupRoleServlet(RestServlet): group_id, requester_user_id, role_id=role_id ) - defer.returnValue((200, resp)) + return (200, resp) class GroupRolesServlet(RestServlet): @@ -275,7 +275,7 @@ class GroupRolesServlet(RestServlet): group_id, requester_user_id ) - defer.returnValue((200, category)) + return (200, category) class GroupSummaryUsersRoleServlet(RestServlet): @@ -312,7 +312,7 @@ class GroupSummaryUsersRoleServlet(RestServlet): content=content, ) - defer.returnValue((200, resp)) + return (200, resp) @defer.inlineCallbacks def on_DELETE(self, request, group_id, role_id, user_id): @@ -323,7 +323,7 @@ class GroupSummaryUsersRoleServlet(RestServlet): group_id, requester_user_id, user_id=user_id, role_id=role_id ) - defer.returnValue((200, resp)) + return (200, resp) class GroupRoomServlet(RestServlet): @@ -347,7 +347,7 @@ class GroupRoomServlet(RestServlet): group_id, requester_user_id ) - defer.returnValue((200, result)) + return (200, result) class GroupUsersServlet(RestServlet): @@ -371,7 +371,7 @@ class GroupUsersServlet(RestServlet): group_id, requester_user_id ) - defer.returnValue((200, result)) + return (200, result) class GroupInvitedUsersServlet(RestServlet): @@ -395,7 +395,7 @@ class GroupInvitedUsersServlet(RestServlet): group_id, requester_user_id ) - defer.returnValue((200, result)) + return (200, result) class GroupSettingJoinPolicyServlet(RestServlet): @@ -420,7 +420,7 @@ class GroupSettingJoinPolicyServlet(RestServlet): group_id, requester_user_id, content ) - defer.returnValue((200, result)) + return (200, result) class GroupCreateServlet(RestServlet): @@ -450,7 +450,7 @@ class GroupCreateServlet(RestServlet): group_id, requester_user_id, content ) - defer.returnValue((200, result)) + return (200, result) class GroupAdminRoomsServlet(RestServlet): @@ -477,7 +477,7 @@ class GroupAdminRoomsServlet(RestServlet): group_id, requester_user_id, room_id, content ) - defer.returnValue((200, result)) + return (200, result) @defer.inlineCallbacks def on_DELETE(self, request, group_id, room_id): @@ -488,7 +488,7 @@ class GroupAdminRoomsServlet(RestServlet): group_id, requester_user_id, room_id ) - defer.returnValue((200, result)) + return (200, result) class GroupAdminRoomsConfigServlet(RestServlet): @@ -516,7 +516,7 @@ class GroupAdminRoomsConfigServlet(RestServlet): group_id, requester_user_id, room_id, config_key, content ) - defer.returnValue((200, result)) + return (200, result) class GroupAdminUsersInviteServlet(RestServlet): @@ -546,7 +546,7 @@ class GroupAdminUsersInviteServlet(RestServlet): group_id, user_id, requester_user_id, config ) - defer.returnValue((200, result)) + return (200, result) class GroupAdminUsersKickServlet(RestServlet): @@ -573,7 +573,7 @@ class GroupAdminUsersKickServlet(RestServlet): group_id, user_id, requester_user_id, content ) - defer.returnValue((200, result)) + return (200, result) class GroupSelfLeaveServlet(RestServlet): @@ -598,7 +598,7 @@ class GroupSelfLeaveServlet(RestServlet): group_id, requester_user_id, requester_user_id, content ) - defer.returnValue((200, result)) + return (200, result) class GroupSelfJoinServlet(RestServlet): @@ -623,7 +623,7 @@ class GroupSelfJoinServlet(RestServlet): group_id, requester_user_id, content ) - defer.returnValue((200, result)) + return (200, result) class GroupSelfAcceptInviteServlet(RestServlet): @@ -648,7 +648,7 @@ class GroupSelfAcceptInviteServlet(RestServlet): group_id, requester_user_id, content ) - defer.returnValue((200, result)) + return (200, result) class GroupSelfUpdatePublicityServlet(RestServlet): @@ -672,7 +672,7 @@ class GroupSelfUpdatePublicityServlet(RestServlet): publicise = content["publicise"] yield self.store.update_group_publicity(group_id, requester_user_id, publicise) - defer.returnValue((200, {})) + return (200, {}) class PublicisedGroupsForUserServlet(RestServlet): @@ -694,7 +694,7 @@ class PublicisedGroupsForUserServlet(RestServlet): result = yield self.groups_handler.get_publicised_groups_for_user(user_id) - defer.returnValue((200, result)) + return (200, result) class PublicisedGroupsForUsersServlet(RestServlet): @@ -719,7 +719,7 @@ class PublicisedGroupsForUsersServlet(RestServlet): result = yield self.groups_handler.bulk_get_publicised_groups(user_ids) - defer.returnValue((200, result)) + return (200, result) class GroupsForUserServlet(RestServlet): @@ -741,7 +741,7 @@ class GroupsForUserServlet(RestServlet): result = yield self.groups_handler.get_joined_groups(requester_user_id) - defer.returnValue((200, result)) + return (200, result) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index 45c9928b65..6008adec7c 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -95,7 +95,7 @@ class KeyUploadServlet(RestServlet): result = yield self.e2e_keys_handler.upload_keys_for_user( user_id, device_id, body ) - defer.returnValue((200, result)) + return (200, result) class KeyQueryServlet(RestServlet): @@ -149,7 +149,7 @@ class KeyQueryServlet(RestServlet): timeout = parse_integer(request, "timeout", 10 * 1000) body = parse_json_object_from_request(request) result = yield self.e2e_keys_handler.query_devices(body, timeout) - defer.returnValue((200, result)) + return (200, result) class KeyChangesServlet(RestServlet): @@ -189,7 +189,7 @@ class KeyChangesServlet(RestServlet): results = yield self.device_handler.get_user_ids_changed(user_id, from_token) - defer.returnValue((200, results)) + return (200, results) class OneTimeKeyServlet(RestServlet): @@ -224,7 +224,7 @@ class OneTimeKeyServlet(RestServlet): timeout = parse_integer(request, "timeout", 10 * 1000) body = parse_json_object_from_request(request) result = yield self.e2e_keys_handler.claim_one_time_keys(body, timeout) - defer.returnValue((200, result)) + return (200, result) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/notifications.py b/synapse/rest/client/v2_alpha/notifications.py index 728a52328f..d034863a3c 100644 --- a/synapse/rest/client/v2_alpha/notifications.py +++ b/synapse/rest/client/v2_alpha/notifications.py @@ -88,9 +88,7 @@ class NotificationsServlet(RestServlet): returned_push_actions.append(returned_pa) next_token = str(pa["stream_ordering"]) - defer.returnValue( - (200, {"notifications": returned_push_actions, "next_token": next_token}) - ) + return (200, {"notifications": returned_push_actions, "next_token": next_token}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/openid.py b/synapse/rest/client/v2_alpha/openid.py index b1b5385b09..b4925c0f59 100644 --- a/synapse/rest/client/v2_alpha/openid.py +++ b/synapse/rest/client/v2_alpha/openid.py @@ -83,16 +83,14 @@ class IdTokenServlet(RestServlet): yield self.store.insert_open_id_token(token, ts_valid_until_ms, user_id) - defer.returnValue( - ( - 200, - { - "access_token": token, - "token_type": "Bearer", - "matrix_server_name": self.server_name, - "expires_in": self.EXPIRES_MS / 1000, - }, - ) + return ( + 200, + { + "access_token": token, + "token_type": "Bearer", + "matrix_server_name": self.server_name, + "expires_in": self.EXPIRES_MS / 1000, + }, ) diff --git a/synapse/rest/client/v2_alpha/read_marker.py b/synapse/rest/client/v2_alpha/read_marker.py index e75664279b..d93d6a9f24 100644 --- a/synapse/rest/client/v2_alpha/read_marker.py +++ b/synapse/rest/client/v2_alpha/read_marker.py @@ -59,7 +59,7 @@ class ReadMarkerRestServlet(RestServlet): event_id=read_marker_event_id, ) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/receipts.py b/synapse/rest/client/v2_alpha/receipts.py index 488905626a..98a97b7059 100644 --- a/synapse/rest/client/v2_alpha/receipts.py +++ b/synapse/rest/client/v2_alpha/receipts.py @@ -52,7 +52,7 @@ class ReceiptRestServlet(RestServlet): room_id, receipt_type, user_id=requester.user.to_string(), event_id=event_id ) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index f327999e59..05ea1459e3 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -95,7 +95,7 @@ class EmailRegisterRequestTokenRestServlet(RestServlet): raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) ret = yield self.identity_handler.requestEmailToken(**body) - defer.returnValue((200, ret)) + return (200, ret) class MsisdnRegisterRequestTokenRestServlet(RestServlet): @@ -138,7 +138,7 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet): ) ret = yield self.identity_handler.requestMsisdnToken(**body) - defer.returnValue((200, ret)) + return (200, ret) class UsernameAvailabilityRestServlet(RestServlet): @@ -178,7 +178,7 @@ class UsernameAvailabilityRestServlet(RestServlet): yield self.registration_handler.check_username(username) - defer.returnValue((200, {"available": True})) + return (200, {"available": True}) class RegisterRestServlet(RestServlet): @@ -230,7 +230,7 @@ class RegisterRestServlet(RestServlet): if kind == b"guest": ret = yield self._do_guest_registration(body, address=client_addr) - defer.returnValue(ret) + return ret return elif kind != b"user": raise UnrecognizedRequestError( @@ -282,7 +282,7 @@ class RegisterRestServlet(RestServlet): result = yield self._do_appservice_registration( desired_username, access_token, body ) - defer.returnValue((200, result)) # we throw for non 200 responses + return (200, result) # we throw for non 200 responses return # for either shared secret or regular registration, downcase the @@ -301,7 +301,7 @@ class RegisterRestServlet(RestServlet): result = yield self._do_shared_secret_registration( desired_username, desired_password, body ) - defer.returnValue((200, result)) # we throw for non 200 responses + return (200, result) # we throw for non 200 responses return # == Normal User Registration == (everyone else) @@ -500,7 +500,7 @@ class RegisterRestServlet(RestServlet): bind_msisdn=params.get("bind_msisdn"), ) - defer.returnValue((200, return_dict)) + return (200, return_dict) def on_OPTIONS(self, _): return 200, {} @@ -510,7 +510,7 @@ class RegisterRestServlet(RestServlet): user_id = yield self.registration_handler.appservice_register( username, as_token ) - defer.returnValue((yield self._create_registration_details(user_id, body))) + return (yield self._create_registration_details(user_id, body)) @defer.inlineCallbacks def _do_shared_secret_registration(self, username, password, body): @@ -546,7 +546,7 @@ class RegisterRestServlet(RestServlet): ) result = yield self._create_registration_details(user_id, body) - defer.returnValue(result) + return result @defer.inlineCallbacks def _create_registration_details(self, user_id, params): @@ -570,7 +570,7 @@ class RegisterRestServlet(RestServlet): ) result.update({"access_token": access_token, "device_id": device_id}) - defer.returnValue(result) + return result @defer.inlineCallbacks def _do_guest_registration(self, params, address=None): @@ -588,16 +588,14 @@ class RegisterRestServlet(RestServlet): user_id, device_id, initial_display_name, is_guest=True ) - defer.returnValue( - ( - 200, - { - "user_id": user_id, - "device_id": device_id, - "access_token": access_token, - "home_server": self.hs.hostname, - }, - ) + return ( + 200, + { + "user_id": user_id, + "device_id": device_id, + "access_token": access_token, + "home_server": self.hs.hostname, + }, ) diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py index 6e52f6d284..6fde3decdb 100644 --- a/synapse/rest/client/v2_alpha/relations.py +++ b/synapse/rest/client/v2_alpha/relations.py @@ -116,7 +116,7 @@ class RelationSendServlet(RestServlet): requester, event_dict=event_dict, txn_id=txn_id ) - defer.returnValue((200, {"event_id": event.event_id})) + return (200, {"event_id": event.event_id}) class RelationPaginationServlet(RestServlet): @@ -196,7 +196,7 @@ class RelationPaginationServlet(RestServlet): return_value["chunk"] = events return_value["original_event"] = original_event - defer.returnValue((200, return_value)) + return (200, return_value) class RelationAggregationPaginationServlet(RestServlet): @@ -268,7 +268,7 @@ class RelationAggregationPaginationServlet(RestServlet): to_token=to_token, ) - defer.returnValue((200, pagination_chunk.to_dict())) + return (200, pagination_chunk.to_dict()) class RelationAggregationGroupPaginationServlet(RestServlet): @@ -354,7 +354,7 @@ class RelationAggregationGroupPaginationServlet(RestServlet): return_value = result.to_dict() return_value["chunk"] = events - defer.returnValue((200, return_value)) + return (200, return_value) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/report_event.py b/synapse/rest/client/v2_alpha/report_event.py index e7578af804..3fdd4584a3 100644 --- a/synapse/rest/client/v2_alpha/report_event.py +++ b/synapse/rest/client/v2_alpha/report_event.py @@ -72,7 +72,7 @@ class ReportEventRestServlet(RestServlet): received_ts=self.clock.time_msec(), ) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 8d1b810565..10dec96208 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -135,7 +135,7 @@ class RoomKeysServlet(RestServlet): body = {"rooms": {room_id: body}} yield self.e2e_room_keys_handler.upload_room_keys(user_id, version, body) - defer.returnValue((200, {})) + return (200, {}) @defer.inlineCallbacks def on_GET(self, request, room_id, session_id): @@ -218,7 +218,7 @@ class RoomKeysServlet(RestServlet): else: room_keys = room_keys["rooms"][room_id] - defer.returnValue((200, room_keys)) + return (200, room_keys) @defer.inlineCallbacks def on_DELETE(self, request, room_id, session_id): @@ -242,7 +242,7 @@ class RoomKeysServlet(RestServlet): yield self.e2e_room_keys_handler.delete_room_keys( user_id, version, room_id, session_id ) - defer.returnValue((200, {})) + return (200, {}) class RoomKeysNewVersionServlet(RestServlet): @@ -293,7 +293,7 @@ class RoomKeysNewVersionServlet(RestServlet): info = parse_json_object_from_request(request) new_version = yield self.e2e_room_keys_handler.create_version(user_id, info) - defer.returnValue((200, {"version": new_version})) + return (200, {"version": new_version}) # we deliberately don't have a PUT /version, as these things really should # be immutable to avoid people footgunning @@ -338,7 +338,7 @@ class RoomKeysVersionServlet(RestServlet): except SynapseError as e: if e.code == 404: raise SynapseError(404, "No backup found", Codes.NOT_FOUND) - defer.returnValue((200, info)) + return (200, info) @defer.inlineCallbacks def on_DELETE(self, request, version): @@ -358,7 +358,7 @@ class RoomKeysVersionServlet(RestServlet): user_id = requester.user.to_string() yield self.e2e_room_keys_handler.delete_version(user_id, version) - defer.returnValue((200, {})) + return (200, {}) @defer.inlineCallbacks def on_PUT(self, request, version): @@ -392,7 +392,7 @@ class RoomKeysVersionServlet(RestServlet): ) yield self.e2e_room_keys_handler.update_version(user_id, version, info) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py index d7f7faa029..14ba61a63e 100644 --- a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py +++ b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py @@ -80,7 +80,7 @@ class RoomUpgradeRestServlet(RestServlet): ret = {"replacement_room": new_room_id} - defer.returnValue((200, ret)) + return (200, ret) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/sendtodevice.py b/synapse/rest/client/v2_alpha/sendtodevice.py index 78075b8fc0..2613648d82 100644 --- a/synapse/rest/client/v2_alpha/sendtodevice.py +++ b/synapse/rest/client/v2_alpha/sendtodevice.py @@ -60,7 +60,7 @@ class SendToDeviceRestServlet(servlet.RestServlet): ) response = (200, {}) - defer.returnValue(response) + return response def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 02d56dee6c..7b32dd2212 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -174,7 +174,7 @@ class SyncRestServlet(RestServlet): time_now, sync_result, requester.access_token_id, filter ) - defer.returnValue((200, response_content)) + return (200, response_content) @defer.inlineCallbacks def encode_response(self, time_now, sync_result, access_token_id, filter): @@ -205,27 +205,23 @@ class SyncRestServlet(RestServlet): event_formatter, ) - defer.returnValue( - { - "account_data": {"events": sync_result.account_data}, - "to_device": {"events": sync_result.to_device}, - "device_lists": { - "changed": list(sync_result.device_lists.changed), - "left": list(sync_result.device_lists.left), - }, - "presence": SyncRestServlet.encode_presence( - sync_result.presence, time_now - ), - "rooms": {"join": joined, "invite": invited, "leave": archived}, - "groups": { - "join": sync_result.groups.join, - "invite": sync_result.groups.invite, - "leave": sync_result.groups.leave, - }, - "device_one_time_keys_count": sync_result.device_one_time_keys_count, - "next_batch": sync_result.next_batch.to_string(), - } - ) + return { + "account_data": {"events": sync_result.account_data}, + "to_device": {"events": sync_result.to_device}, + "device_lists": { + "changed": list(sync_result.device_lists.changed), + "left": list(sync_result.device_lists.left), + }, + "presence": SyncRestServlet.encode_presence(sync_result.presence, time_now), + "rooms": {"join": joined, "invite": invited, "leave": archived}, + "groups": { + "join": sync_result.groups.join, + "invite": sync_result.groups.invite, + "leave": sync_result.groups.leave, + }, + "device_one_time_keys_count": sync_result.device_one_time_keys_count, + "next_batch": sync_result.next_batch.to_string(), + } @staticmethod def encode_presence(events, time_now): @@ -273,7 +269,7 @@ class SyncRestServlet(RestServlet): event_formatter=event_formatter, ) - defer.returnValue(joined) + return joined @defer.inlineCallbacks def encode_invited(self, rooms, time_now, token_id, event_formatter): @@ -309,7 +305,7 @@ class SyncRestServlet(RestServlet): invited_state.append(invite) invited[room.room_id] = {"invite_state": {"events": invited_state}} - defer.returnValue(invited) + return invited @defer.inlineCallbacks def encode_archived(self, rooms, time_now, token_id, event_fields, event_formatter): @@ -342,7 +338,7 @@ class SyncRestServlet(RestServlet): event_formatter=event_formatter, ) - defer.returnValue(joined) + return joined @defer.inlineCallbacks def encode_room( @@ -414,7 +410,7 @@ class SyncRestServlet(RestServlet): result["unread_notifications"] = room.unread_notifications result["summary"] = room.summary - defer.returnValue(result) + return result def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/tags.py b/synapse/rest/client/v2_alpha/tags.py index 07b6ede603..d173544355 100644 --- a/synapse/rest/client/v2_alpha/tags.py +++ b/synapse/rest/client/v2_alpha/tags.py @@ -45,7 +45,7 @@ class TagListServlet(RestServlet): tags = yield self.store.get_tags_for_room(user_id, room_id) - defer.returnValue((200, {"tags": tags})) + return (200, {"tags": tags}) class TagServlet(RestServlet): @@ -76,7 +76,7 @@ class TagServlet(RestServlet): self.notifier.on_new_event("account_data_key", max_id, users=[user_id]) - defer.returnValue((200, {})) + return (200, {}) @defer.inlineCallbacks def on_DELETE(self, request, user_id, room_id, tag): @@ -88,7 +88,7 @@ class TagServlet(RestServlet): self.notifier.on_new_event("account_data_key", max_id, users=[user_id]) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/thirdparty.py b/synapse/rest/client/v2_alpha/thirdparty.py index 1e66662a05..158e686b01 100644 --- a/synapse/rest/client/v2_alpha/thirdparty.py +++ b/synapse/rest/client/v2_alpha/thirdparty.py @@ -40,7 +40,7 @@ class ThirdPartyProtocolsServlet(RestServlet): yield self.auth.get_user_by_req(request, allow_guest=True) protocols = yield self.appservice_handler.get_3pe_protocols() - defer.returnValue((200, protocols)) + return (200, protocols) class ThirdPartyProtocolServlet(RestServlet): @@ -60,9 +60,9 @@ class ThirdPartyProtocolServlet(RestServlet): only_protocol=protocol ) if protocol in protocols: - defer.returnValue((200, protocols[protocol])) + return (200, protocols[protocol]) else: - defer.returnValue((404, {"error": "Unknown protocol"})) + return (404, {"error": "Unknown protocol"}) class ThirdPartyUserServlet(RestServlet): @@ -85,7 +85,7 @@ class ThirdPartyUserServlet(RestServlet): ThirdPartyEntityKind.USER, protocol, fields ) - defer.returnValue((200, results)) + return (200, results) class ThirdPartyLocationServlet(RestServlet): @@ -108,7 +108,7 @@ class ThirdPartyLocationServlet(RestServlet): ThirdPartyEntityKind.LOCATION, protocol, fields ) - defer.returnValue((200, results)) + return (200, results) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/user_directory.py b/synapse/rest/client/v2_alpha/user_directory.py index e19fb6d583..7ab2b80e46 100644 --- a/synapse/rest/client/v2_alpha/user_directory.py +++ b/synapse/rest/client/v2_alpha/user_directory.py @@ -60,7 +60,7 @@ class UserDirectorySearchRestServlet(RestServlet): user_id = requester.user.to_string() if not self.hs.config.user_directory_search_enabled: - defer.returnValue((200, {"limited": False, "results": []})) + return (200, {"limited": False, "results": []}) body = parse_json_object_from_request(request) @@ -76,7 +76,7 @@ class UserDirectorySearchRestServlet(RestServlet): user_id, search_term, limit ) - defer.returnValue((200, results)) + return (200, results) def register_servlets(hs, http_server): diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 65afffbb42..92beefa176 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -171,7 +171,7 @@ class MediaRepository(object): yield self._generate_thumbnails(None, media_id, media_id, media_type) - defer.returnValue("mxc://%s/%s" % (self.server_name, media_id)) + return "mxc://%s/%s" % (self.server_name, media_id) @defer.inlineCallbacks def get_local_media(self, request, media_id, name): @@ -282,7 +282,7 @@ class MediaRepository(object): with responder: pass - defer.returnValue(media_info) + return media_info @defer.inlineCallbacks def _get_remote_media_impl(self, server_name, media_id): @@ -317,14 +317,14 @@ class MediaRepository(object): responder = yield self.media_storage.fetch_media(file_info) if responder: - defer.returnValue((responder, media_info)) + return (responder, media_info) # Failed to find the file anywhere, lets download it. media_info = yield self._download_remote_file(server_name, media_id, file_id) responder = yield self.media_storage.fetch_media(file_info) - defer.returnValue((responder, media_info)) + return (responder, media_info) @defer.inlineCallbacks def _download_remote_file(self, server_name, media_id, file_id): @@ -421,7 +421,7 @@ class MediaRepository(object): yield self._generate_thumbnails(server_name, media_id, file_id, media_type) - defer.returnValue(media_info) + return media_info def _get_thumbnail_requirements(self, media_type): return self.thumbnail_requirements.get(media_type, ()) @@ -500,7 +500,7 @@ class MediaRepository(object): media_id, t_width, t_height, t_type, t_method, t_len ) - defer.returnValue(output_path) + return output_path @defer.inlineCallbacks def generate_remote_exact_thumbnail( @@ -554,7 +554,7 @@ class MediaRepository(object): t_len, ) - defer.returnValue(output_path) + return output_path @defer.inlineCallbacks def _generate_thumbnails( @@ -667,7 +667,7 @@ class MediaRepository(object): media_id, t_width, t_height, t_type, t_method, t_len ) - defer.returnValue({"width": m_width, "height": m_height}) + return {"width": m_width, "height": m_height} @defer.inlineCallbacks def delete_old_remote_media(self, before_ts): @@ -704,7 +704,7 @@ class MediaRepository(object): yield self.store.delete_remote_media(origin, media_id) deleted += 1 - defer.returnValue({"deleted": deleted}) + return {"deleted": deleted} class MediaRepositoryResource(Resource): diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py index 25e5ac2848..3b87717a5a 100644 --- a/synapse/rest/media/v1/media_storage.py +++ b/synapse/rest/media/v1/media_storage.py @@ -69,7 +69,7 @@ class MediaStorage(object): ) yield finish_cb() - defer.returnValue(fname) + return fname @contextlib.contextmanager def store_into_file(self, file_info): @@ -143,14 +143,14 @@ class MediaStorage(object): path = self._file_info_to_path(file_info) local_path = os.path.join(self.local_media_directory, path) if os.path.exists(local_path): - defer.returnValue(FileResponder(open(local_path, "rb"))) + return FileResponder(open(local_path, "rb")) for provider in self.storage_providers: res = yield provider.fetch(path, file_info) if res: - defer.returnValue(res) + return res - defer.returnValue(None) + return None @defer.inlineCallbacks def ensure_media_is_in_local_cache(self, file_info): @@ -166,7 +166,7 @@ class MediaStorage(object): path = self._file_info_to_path(file_info) local_path = os.path.join(self.local_media_directory, path) if os.path.exists(local_path): - defer.returnValue(local_path) + return local_path dirname = os.path.dirname(local_path) if not os.path.exists(dirname): @@ -181,7 +181,7 @@ class MediaStorage(object): ) yield res.write_to_consumer(consumer) yield consumer.wait() - defer.returnValue(local_path) + return local_path raise Exception("file could not be found") diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 5871737bfd..bd40891a7f 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -182,7 +182,7 @@ class PreviewUrlResource(DirectServeResource): og = cache_result["og"] if isinstance(og, six.text_type): og = og.encode("utf8") - defer.returnValue(og) + return og return media_info = yield self._download_url(url, user) @@ -284,7 +284,7 @@ class PreviewUrlResource(DirectServeResource): media_info["created_ts"], ) - defer.returnValue(jsonog) + return jsonog @defer.inlineCallbacks def _download_url(self, url, user): @@ -354,22 +354,20 @@ class PreviewUrlResource(DirectServeResource): # therefore not expire it. raise - defer.returnValue( - { - "media_type": media_type, - "media_length": length, - "download_name": download_name, - "created_ts": time_now_ms, - "filesystem_id": file_id, - "filename": fname, - "uri": uri, - "response_code": code, - # FIXME: we should calculate a proper expiration based on the - # Cache-Control and Expire headers. But for now, assume 1 hour. - "expires": 60 * 60 * 1000, - "etag": headers["ETag"][0] if "ETag" in headers else None, - } - ) + return { + "media_type": media_type, + "media_length": length, + "download_name": download_name, + "created_ts": time_now_ms, + "filesystem_id": file_id, + "filename": fname, + "uri": uri, + "response_code": code, + # FIXME: we should calculate a proper expiration based on the + # Cache-Control and Expire headers. But for now, assume 1 hour. + "expires": 60 * 60 * 1000, + "etag": headers["ETag"][0] if "ETag" in headers else None, + } def _start_expire_url_cache_data(self): return run_as_background_process( diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index f183743f31..729c097e6d 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -193,4 +193,4 @@ class ResourceLimitsServerNotices(object): if event_id in referenced_events: referenced_events.remove(event.event_id) - defer.returnValue((currently_blocked, referenced_events)) + return (currently_blocked, referenced_events) diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py index 71e7e75320..2dac90578c 100644 --- a/synapse/server_notices/server_notices_manager.py +++ b/synapse/server_notices/server_notices_manager.py @@ -86,7 +86,7 @@ class ServerNoticesManager(object): res = yield self._event_creation_handler.create_and_send_nonmember_event( requester, event_dict, ratelimit=False ) - defer.returnValue(res) + return res @cachedInlineCallbacks() def get_notice_room_for_user(self, user_id): @@ -120,7 +120,7 @@ class ServerNoticesManager(object): # we found a room which our user shares with the system notice # user logger.info("Using room %s", room.room_id) - defer.returnValue(room.room_id) + return room.room_id # apparently no existing notice room: create a new one logger.info("Creating server notices room for %s", user_id) @@ -158,4 +158,4 @@ class ServerNoticesManager(object): self._notifier.on_new_event("account_data_key", max_id, users=[user_id]) logger.info("Created server notices room %s for %s", room_id, user_id) - defer.returnValue(room_id) + return room_id diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 9f708fa205..a0d34f16ea 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -135,7 +135,7 @@ class StateHandler(object): event = None if event_id: event = yield self.store.get_event(event_id, allow_none=True) - defer.returnValue(event) + return event return state_map = yield self.store.get_events( @@ -145,7 +145,7 @@ class StateHandler(object): key: state_map[e_id] for key, e_id in iteritems(state) if e_id in state_map } - defer.returnValue(state) + return state @defer.inlineCallbacks def get_current_state_ids(self, room_id, latest_event_ids=None): @@ -169,7 +169,7 @@ class StateHandler(object): ret = yield self.resolve_state_groups_for_events(room_id, latest_event_ids) state = ret.state - defer.returnValue(state) + return state @defer.inlineCallbacks def get_current_users_in_room(self, room_id, latest_event_ids=None): @@ -189,7 +189,7 @@ class StateHandler(object): logger.debug("calling resolve_state_groups from get_current_users_in_room") entry = yield self.resolve_state_groups_for_events(room_id, latest_event_ids) joined_users = yield self.store.get_joined_users_from_state(room_id, entry) - defer.returnValue(joined_users) + return joined_users @defer.inlineCallbacks def get_current_hosts_in_room(self, room_id, latest_event_ids=None): @@ -198,7 +198,7 @@ class StateHandler(object): logger.debug("calling resolve_state_groups from get_current_hosts_in_room") entry = yield self.resolve_state_groups_for_events(room_id, latest_event_ids) joined_hosts = yield self.store.get_joined_hosts(room_id, entry) - defer.returnValue(joined_hosts) + return joined_hosts @defer.inlineCallbacks def compute_event_context(self, event, old_state=None): @@ -241,7 +241,7 @@ class StateHandler(object): prev_state_ids=prev_state_ids, ) - defer.returnValue(context) + return context if old_state: # We already have the state, so we don't need to calculate it. @@ -275,7 +275,7 @@ class StateHandler(object): prev_state_ids=prev_state_ids, ) - defer.returnValue(context) + return context logger.debug("calling resolve_state_groups from compute_event_context") @@ -343,7 +343,7 @@ class StateHandler(object): delta_ids=delta_ids, ) - defer.returnValue(context) + return context @defer.inlineCallbacks def resolve_state_groups_for_events(self, room_id, event_ids): @@ -368,19 +368,17 @@ class StateHandler(object): state_groups_ids = yield self.store.get_state_groups_ids(room_id, event_ids) if len(state_groups_ids) == 0: - defer.returnValue(_StateCacheEntry(state={}, state_group=None)) + return _StateCacheEntry(state={}, state_group=None) elif len(state_groups_ids) == 1: name, state_list = list(state_groups_ids.items()).pop() prev_group, delta_ids = yield self.store.get_state_group_delta(name) - defer.returnValue( - _StateCacheEntry( - state=state_list, - state_group=name, - prev_group=prev_group, - delta_ids=delta_ids, - ) + return _StateCacheEntry( + state=state_list, + state_group=name, + prev_group=prev_group, + delta_ids=delta_ids, ) room_version = yield self.store.get_room_version(room_id) @@ -392,7 +390,7 @@ class StateHandler(object): None, state_res_store=StateResolutionStore(self.store), ) - defer.returnValue(result) + return result @defer.inlineCallbacks def resolve_events(self, room_version, state_sets, event): @@ -415,7 +413,7 @@ class StateHandler(object): new_state = {key: state_map[ev_id] for key, ev_id in iteritems(new_state)} - defer.returnValue(new_state) + return new_state class StateResolutionHandler(object): @@ -479,7 +477,7 @@ class StateResolutionHandler(object): if self._state_cache is not None: cache = self._state_cache.get(group_names, None) if cache: - defer.returnValue(cache) + return cache logger.info( "Resolving state for %s with %d groups", room_id, len(state_groups_ids) @@ -525,7 +523,7 @@ class StateResolutionHandler(object): if self._state_cache is not None: self._state_cache[group_names] = cache - defer.returnValue(cache) + return cache def _make_state_cache_entry(new_state, state_groups_ids): diff --git a/synapse/state/v1.py b/synapse/state/v1.py index 88acd4817e..a2f92d9ff9 100644 --- a/synapse/state/v1.py +++ b/synapse/state/v1.py @@ -55,7 +55,7 @@ def resolve_events_with_store(state_sets, event_map, state_map_factory): a map from (type, state_key) to event_id. """ if len(state_sets) == 1: - defer.returnValue(state_sets[0]) + return state_sets[0] unconflicted_state, conflicted_state = _seperate(state_sets) @@ -97,10 +97,8 @@ def resolve_events_with_store(state_sets, event_map, state_map_factory): state_map_new = yield state_map_factory(new_needed_events) state_map.update(state_map_new) - defer.returnValue( - _resolve_with_state( - unconflicted_state, conflicted_state, auth_events, state_map - ) + return _resolve_with_state( + unconflicted_state, conflicted_state, auth_events, state_map ) diff --git a/synapse/state/v2.py b/synapse/state/v2.py index db969e8997..b327c86f40 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -63,7 +63,7 @@ def resolve_events_with_store(room_version, state_sets, event_map, state_res_sto unconflicted_state, conflicted_state = _seperate(state_sets) if not conflicted_state: - defer.returnValue(unconflicted_state) + return unconflicted_state logger.debug("%d conflicted state entries", len(conflicted_state)) logger.debug("Calculating auth chain difference") @@ -137,7 +137,7 @@ def resolve_events_with_store(room_version, state_sets, event_map, state_res_sto logger.debug("done") - defer.returnValue(resolved_state) + return resolved_state @defer.inlineCallbacks @@ -168,18 +168,18 @@ def _get_power_level_for_sender(event_id, event_map, state_res_store): aev = yield _get_event(aid, event_map, state_res_store) if (aev.type, aev.state_key) == (EventTypes.Create, ""): if aev.content.get("creator") == event.sender: - defer.returnValue(100) + return 100 break - defer.returnValue(0) + return 0 level = pl.content.get("users", {}).get(event.sender) if level is None: level = pl.content.get("users_default", 0) if level is None: - defer.returnValue(0) + return 0 else: - defer.returnValue(int(level)) + return int(level) @defer.inlineCallbacks @@ -224,7 +224,7 @@ def _get_auth_chain_difference(state_sets, event_map, state_res_store): intersection = set(auth_sets[0]).intersection(*auth_sets[1:]) union = set().union(*auth_sets) - defer.returnValue(union - intersection) + return union - intersection def _seperate(state_sets): @@ -343,7 +343,7 @@ def _reverse_topological_power_sort(event_ids, event_map, state_res_store, auth_ it = lexicographical_topological_sort(graph, key=_get_power_order) sorted_events = list(it) - defer.returnValue(sorted_events) + return sorted_events @defer.inlineCallbacks @@ -396,7 +396,7 @@ def _iterative_auth_checks( except AuthError: pass - defer.returnValue(resolved_state) + return resolved_state @defer.inlineCallbacks @@ -439,7 +439,7 @@ def _mainline_sort(event_ids, resolved_power_event_id, event_map, state_res_stor event_ids.sort(key=lambda ev_id: order_map[ev_id]) - defer.returnValue(event_ids) + return event_ids @defer.inlineCallbacks @@ -462,7 +462,7 @@ def _get_mainline_depth_for_event(event, mainline_map, event_map, state_res_stor while event: depth = mainline_map.get(event.event_id) if depth is not None: - defer.returnValue(depth) + return depth auth_events = event.auth_event_ids() event = None @@ -474,7 +474,7 @@ def _get_mainline_depth_for_event(event, mainline_map, event_map, state_res_stor break # Didn't find a power level auth event, so we just return 0 - defer.returnValue(0) + return 0 @defer.inlineCallbacks @@ -493,7 +493,7 @@ def _get_event(event_id, event_map, state_res_store): if event_id not in event_map: events = yield state_res_store.get_events([event_id], allow_rejected=True) event_map.update(events) - defer.returnValue(event_map[event_id]) + return event_map[event_id] def lexicographical_topological_sort(graph, key): diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 86a333a919..e7f6ea7286 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -498,7 +498,7 @@ class DataStore( ) count = yield self.runInteraction("get_users_paginate", self.get_user_count_txn) retval = {"users": users, "total": count} - defer.returnValue(retval) + return retval def search_users(self, term): """Function to search users list for one or more users with diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index a7c93efa46..489ce82fae 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -513,7 +513,7 @@ class SQLBaseStore(object): after_callback(*after_args, **after_kwargs) raise - defer.returnValue(result) + return result @defer.inlineCallbacks def runWithConnection(self, func, *args, **kwargs): @@ -553,7 +553,7 @@ class SQLBaseStore(object): with PreserveLoggingContext(): result = yield self._db_pool.runWithConnection(inner_func, *args, **kwargs) - defer.returnValue(result) + return result @staticmethod def cursor_to_dict(cursor): @@ -615,8 +615,8 @@ class SQLBaseStore(object): # a cursor after we receive an error from the db. if not or_ignore: raise - defer.returnValue(False) - defer.returnValue(True) + return False + return True @staticmethod def _simple_insert_txn(txn, table, values): @@ -708,7 +708,7 @@ class SQLBaseStore(object): insertion_values, lock=lock, ) - defer.returnValue(result) + return result except self.database_engine.module.IntegrityError as e: attempts += 1 if attempts >= 5: @@ -1121,7 +1121,7 @@ class SQLBaseStore(object): results = [] if not iterable: - defer.returnValue(results) + return results # iterables can not be sliced, so convert it to a list first it_list = list(iterable) @@ -1142,7 +1142,7 @@ class SQLBaseStore(object): results.extend(rows) - defer.returnValue(results) + return results @classmethod def _simple_select_many_txn(cls, txn, table, column, iterable, keyvalues, retcols): diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index 8394389073..9fa5b4f3d6 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -111,9 +111,9 @@ class AccountDataWorkerStore(SQLBaseStore): ) if result: - defer.returnValue(json.loads(result)) + return json.loads(result) else: - defer.returnValue(None) + return None @cached(num_args=2) def get_account_data_for_room(self, user_id, room_id): @@ -264,11 +264,9 @@ class AccountDataWorkerStore(SQLBaseStore): on_invalidate=cache_context.invalidate, ) if not ignored_account_data: - defer.returnValue(False) + return False - defer.returnValue( - ignored_user_id in ignored_account_data.get("ignored_users", {}) - ) + return ignored_user_id in ignored_account_data.get("ignored_users", {}) class AccountDataStore(AccountDataWorkerStore): @@ -332,7 +330,7 @@ class AccountDataStore(AccountDataWorkerStore): ) result = self._account_data_id_gen.get_current_token() - defer.returnValue(result) + return result @defer.inlineCallbacks def add_account_data_for_user(self, user_id, account_data_type, content): @@ -373,7 +371,7 @@ class AccountDataStore(AccountDataWorkerStore): ) result = self._account_data_id_gen.get_current_token() - defer.returnValue(result) + return result def _update_max_stream_id(self, next_id): """Update the max stream_id diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index eb329ebd8b..05d9c05c3f 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -145,7 +145,7 @@ class ApplicationServiceTransactionWorkerStore( for service in as_list: if service.id == res["as_id"]: services.append(service) - defer.returnValue(services) + return services @defer.inlineCallbacks def get_appservice_state(self, service): @@ -164,9 +164,9 @@ class ApplicationServiceTransactionWorkerStore( desc="get_appservice_state", ) if result: - defer.returnValue(result.get("state")) + return result.get("state") return - defer.returnValue(None) + return None def set_appservice_state(self, service, state): """Set the application service state. @@ -298,15 +298,13 @@ class ApplicationServiceTransactionWorkerStore( ) if not entry: - defer.returnValue(None) + return None event_ids = json.loads(entry["event_ids"]) events = yield self.get_events_as_list(event_ids) - defer.returnValue( - AppServiceTransaction(service=service, id=entry["txn_id"], events=events) - ) + return AppServiceTransaction(service=service, id=entry["txn_id"], events=events) def _get_last_txn(self, txn, service_id): txn.execute( @@ -360,7 +358,7 @@ class ApplicationServiceTransactionWorkerStore( events = yield self.get_events_as_list(event_ids) - defer.returnValue((upper_bound, events)) + return (upper_bound, events) class ApplicationServiceTransactionStore(ApplicationServiceTransactionWorkerStore): diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index 50f913a414..e5f0668f09 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -115,7 +115,7 @@ class BackgroundUpdateStore(SQLBaseStore): " Unscheduling background update task." ) self._all_done = True - defer.returnValue(None) + return None @defer.inlineCallbacks def has_completed_background_updates(self): @@ -127,11 +127,11 @@ class BackgroundUpdateStore(SQLBaseStore): # if we've previously determined that there is nothing left to do, that # is easy if self._all_done: - defer.returnValue(True) + return True # obviously, if we have things in our queue, we're not done. if self._background_update_queue: - defer.returnValue(False) + return False # otherwise, check if there are updates to be run. This is important, # as we may be running on a worker which doesn't perform the bg updates @@ -144,9 +144,9 @@ class BackgroundUpdateStore(SQLBaseStore): ) if not updates: self._all_done = True - defer.returnValue(True) + return True - defer.returnValue(False) + return False @defer.inlineCallbacks def do_next_background_update(self, desired_duration_ms): @@ -173,14 +173,14 @@ class BackgroundUpdateStore(SQLBaseStore): if not self._background_update_queue: # no work left to do - defer.returnValue(None) + return None # pop from the front, and add back to the back update_name = self._background_update_queue.pop(0) self._background_update_queue.append(update_name) res = yield self._do_background_update(update_name, desired_duration_ms) - defer.returnValue(res) + return res @defer.inlineCallbacks def _do_background_update(self, update_name, desired_duration_ms): @@ -231,7 +231,7 @@ class BackgroundUpdateStore(SQLBaseStore): performance.update(items_updated, duration_ms) - defer.returnValue(len(self._background_update_performance)) + return len(self._background_update_performance) def register_background_update_handler(self, update_name, update_handler): """Register a handler for doing a background update. @@ -266,7 +266,7 @@ class BackgroundUpdateStore(SQLBaseStore): @defer.inlineCallbacks def noop_update(progress, batch_size): yield self._end_background_update(update_name) - defer.returnValue(1) + return 1 self.register_background_update_handler(update_name, noop_update) @@ -370,7 +370,7 @@ class BackgroundUpdateStore(SQLBaseStore): logger.info("Adding index %s to %s", index_name, table) yield self.runWithConnection(runner) yield self._end_background_update(update_name) - defer.returnValue(1) + return 1 self.register_background_update_handler(update_name, updater) diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py index bda68de5be..6db8c54077 100644 --- a/synapse/storage/client_ips.py +++ b/synapse/storage/client_ips.py @@ -104,7 +104,7 @@ class ClientIpStore(background_updates.BackgroundUpdateStore): yield self.runWithConnection(f) yield self._end_background_update("user_ips_drop_nonunique_index") - defer.returnValue(1) + return 1 @defer.inlineCallbacks def _analyze_user_ip(self, progress, batch_size): @@ -121,7 +121,7 @@ class ClientIpStore(background_updates.BackgroundUpdateStore): yield self._end_background_update("user_ips_analyze") - defer.returnValue(1) + return 1 @defer.inlineCallbacks def _remove_user_ip_dupes(self, progress, batch_size): @@ -291,7 +291,7 @@ class ClientIpStore(background_updates.BackgroundUpdateStore): if last: yield self._end_background_update("user_ips_remove_dupes") - defer.returnValue(batch_size) + return batch_size @defer.inlineCallbacks def insert_client_ip( @@ -401,7 +401,7 @@ class ClientIpStore(background_updates.BackgroundUpdateStore): "device_id": did, "last_seen": last_seen, } - defer.returnValue(ret) + return ret @classmethod def _get_last_client_ip_by_device_txn(cls, txn, user_id, device_id, retcols): @@ -461,14 +461,12 @@ class ClientIpStore(background_updates.BackgroundUpdateStore): ((row["access_token"], row["ip"]), (row["user_agent"], row["last_seen"])) for row in rows ) - defer.returnValue( - list( - { - "access_token": access_token, - "ip": ip, - "user_agent": user_agent, - "last_seen": last_seen, - } - for (access_token, ip), (user_agent, last_seen) in iteritems(results) - ) + return list( + { + "access_token": access_token, + "ip": ip, + "user_agent": user_agent, + "last_seen": last_seen, + } + for (access_token, ip), (user_agent, last_seen) in iteritems(results) ) diff --git a/synapse/storage/deviceinbox.py b/synapse/storage/deviceinbox.py index 4ea0deea4f..79bb0ea46d 100644 --- a/synapse/storage/deviceinbox.py +++ b/synapse/storage/deviceinbox.py @@ -92,7 +92,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): user_id, last_deleted_stream_id ) if not has_changed: - defer.returnValue(0) + return 0 def delete_messages_for_device_txn(txn): sql = ( @@ -115,7 +115,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): last_deleted_stream_id, up_to_stream_id ) - defer.returnValue(count) + return count def get_new_device_msgs_for_remote( self, destination, last_stream_id, current_stream_id, limit @@ -263,7 +263,7 @@ class DeviceInboxStore(DeviceInboxWorkerStore, BackgroundUpdateStore): destination, stream_id ) - defer.returnValue(self._device_inbox_id_gen.get_current_token()) + return self._device_inbox_id_gen.get_current_token() @defer.inlineCallbacks def add_messages_from_remote_to_device_inbox( @@ -312,7 +312,7 @@ class DeviceInboxStore(DeviceInboxWorkerStore, BackgroundUpdateStore): for user_id in local_messages_by_user_then_device.keys(): self._device_inbox_stream_cache.entity_has_changed(user_id, stream_id) - defer.returnValue(stream_id) + return stream_id def _add_messages_to_local_device_inbox_txn( self, txn, stream_id, messages_by_user_then_device @@ -426,4 +426,4 @@ class DeviceInboxStore(DeviceInboxWorkerStore, BackgroundUpdateStore): yield self._end_background_update(self.DEVICE_INBOX_STREAM_ID) - defer.returnValue(1) + return 1 diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py index d2b113a4e7..8f72d92895 100644 --- a/synapse/storage/devices.py +++ b/synapse/storage/devices.py @@ -71,7 +71,7 @@ class DeviceWorkerStore(SQLBaseStore): desc="get_devices_by_user", ) - defer.returnValue({d["device_id"]: d for d in devices}) + return {d["device_id"]: d for d in devices} @defer.inlineCallbacks def get_devices_by_remote(self, destination, from_stream_id, limit): @@ -88,7 +88,7 @@ class DeviceWorkerStore(SQLBaseStore): destination, int(from_stream_id) ) if not has_changed: - defer.returnValue((now_stream_id, [])) + return (now_stream_id, []) # We retrieve n+1 devices from the list of outbound pokes where n is # our outbound device update limit. We then check if the very last @@ -111,7 +111,7 @@ class DeviceWorkerStore(SQLBaseStore): # Return an empty list if there are no updates if not updates: - defer.returnValue((now_stream_id, [])) + return (now_stream_id, []) # if we have exceeded the limit, we need to exclude any results with the # same stream_id as the last row. @@ -147,13 +147,13 @@ class DeviceWorkerStore(SQLBaseStore): # skip that stream_id and return an empty list, and continue with the next # stream_id next time. if not query_map: - defer.returnValue((stream_id_cutoff, [])) + return (stream_id_cutoff, []) results = yield self._get_device_update_edus_by_remote( destination, from_stream_id, query_map ) - defer.returnValue((now_stream_id, results)) + return (now_stream_id, results) def _get_devices_by_remote_txn( self, txn, destination, from_stream_id, now_stream_id, limit @@ -232,7 +232,7 @@ class DeviceWorkerStore(SQLBaseStore): results.append(result) - defer.returnValue(results) + return results def _get_last_device_update_for_remote_user( self, destination, user_id, from_stream_id @@ -330,7 +330,7 @@ class DeviceWorkerStore(SQLBaseStore): else: results[user_id] = yield self._get_cached_devices_for_user(user_id) - defer.returnValue((user_ids_not_in_cache, results)) + return (user_ids_not_in_cache, results) @cachedInlineCallbacks(num_args=2, tree=True) def _get_cached_user_device(self, user_id, device_id): @@ -340,7 +340,7 @@ class DeviceWorkerStore(SQLBaseStore): retcol="content", desc="_get_cached_user_device", ) - defer.returnValue(db_to_json(content)) + return db_to_json(content) @cachedInlineCallbacks() def _get_cached_devices_for_user(self, user_id): @@ -350,9 +350,9 @@ class DeviceWorkerStore(SQLBaseStore): retcols=("device_id", "content"), desc="_get_cached_devices_for_user", ) - defer.returnValue( - {device["device_id"]: db_to_json(device["content"]) for device in devices} - ) + return { + device["device_id"]: db_to_json(device["content"]) for device in devices + } def get_devices_with_keys_by_user(self, user_id): """Get all devices (with any device keys) for a user @@ -482,7 +482,7 @@ class DeviceWorkerStore(SQLBaseStore): results = {user_id: None for user_id in user_ids} results.update({row["user_id"]: row["stream_id"] for row in rows}) - defer.returnValue(results) + return results class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore): @@ -543,7 +543,7 @@ class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore): """ key = (user_id, device_id) if self.device_id_exists_cache.get(key, None): - defer.returnValue(False) + return False try: inserted = yield self._simple_insert( @@ -557,7 +557,7 @@ class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore): or_ignore=True, ) self.device_id_exists_cache.prefill(key, True) - defer.returnValue(inserted) + return inserted except Exception as e: logger.error( "store_device with device_id=%s(%r) user_id=%s(%r)" @@ -780,7 +780,7 @@ class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore): hosts, stream_id, ) - defer.returnValue(stream_id) + return stream_id def _add_device_change_txn(self, txn, user_id, device_ids, hosts, stream_id): now = self._clock.time_msec() @@ -889,4 +889,4 @@ class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore): yield self.runWithConnection(f) yield self._end_background_update(DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES) - defer.returnValue(1) + return 1 diff --git a/synapse/storage/directory.py b/synapse/storage/directory.py index 201bbd430c..e966a73f3d 100644 --- a/synapse/storage/directory.py +++ b/synapse/storage/directory.py @@ -46,7 +46,7 @@ class DirectoryWorkerStore(SQLBaseStore): ) if not room_id: - defer.returnValue(None) + return None return servers = yield self._simple_select_onecol( @@ -57,10 +57,10 @@ class DirectoryWorkerStore(SQLBaseStore): ) if not servers: - defer.returnValue(None) + return None return - defer.returnValue(RoomAliasMapping(room_id, room_alias.to_string(), servers)) + return RoomAliasMapping(room_id, room_alias.to_string(), servers) def get_room_alias_creator(self, room_alias): return self._simple_select_one_onecol( @@ -125,7 +125,7 @@ class DirectoryStore(DirectoryWorkerStore): raise SynapseError( 409, "Room alias %s already exists" % room_alias.to_string() ) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def delete_room_alias(self, room_alias): @@ -133,7 +133,7 @@ class DirectoryStore(DirectoryWorkerStore): "delete_room_alias", self._delete_room_alias_txn, room_alias ) - defer.returnValue(room_id) + return room_id def _delete_room_alias_txn(self, txn, room_alias): txn.execute( diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index f40ef2ab64..99128f2df7 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -61,7 +61,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): row["session_data"] = json.loads(row["session_data"]) - defer.returnValue(row) + return row @defer.inlineCallbacks def set_e2e_room_key(self, user_id, version, room_id, session_id, room_key): @@ -118,7 +118,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): try: version = int(version) except ValueError: - defer.returnValue({"rooms": {}}) + return {"rooms": {}} keyvalues = {"user_id": user_id, "version": version} if room_id: @@ -151,7 +151,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): "session_data": json.loads(row["session_data"]), } - defer.returnValue(sessions) + return sessions @defer.inlineCallbacks def delete_e2e_room_keys(self, user_id, version, room_id=None, session_id=None): diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py index 2fabb9e2cb..1e07474e70 100644 --- a/synapse/storage/end_to_end_keys.py +++ b/synapse/storage/end_to_end_keys.py @@ -41,7 +41,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore): dict containing "key_json", "device_display_name". """ if not query_list: - defer.returnValue({}) + return {} results = yield self.runInteraction( "get_e2e_device_keys", @@ -55,7 +55,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore): for device_id, device_info in iteritems(device_keys): device_info["keys"] = db_to_json(device_info.pop("key_json")) - defer.returnValue(results) + return results def _get_e2e_device_keys_txn( self, txn, query_list, include_all_devices=False, include_deleted_devices=False @@ -130,9 +130,7 @@ class EndToEndKeyWorkerStore(SQLBaseStore): desc="add_e2e_one_time_keys_check", ) - defer.returnValue( - {(row["algorithm"], row["key_id"]): row["key_json"] for row in rows} - ) + return {(row["algorithm"], row["key_id"]): row["key_json"] for row in rows} @defer.inlineCallbacks def add_e2e_one_time_keys(self, user_id, device_id, time_now, new_keys): diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py index cb4478342f..4f500d893e 100644 --- a/synapse/storage/event_federation.py +++ b/synapse/storage/event_federation.py @@ -131,9 +131,9 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas ) if not rows: - defer.returnValue(0) + return 0 else: - defer.returnValue(max(row["depth"] for row in rows)) + return max(row["depth"] for row in rows) def _get_oldest_events_in_room_txn(self, txn, room_id): return self._simple_select_onecol_txn( @@ -169,7 +169,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas # make sure that we don't completely ignore the older events. res = res[0:5] + random.sample(res[5:], 5) - defer.returnValue(res) + return res def get_latest_event_ids_and_hashes_in_room(self, room_id): """ @@ -411,7 +411,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas limit, ) events = yield self.get_events_as_list(ids) - defer.returnValue(events) + return events def _get_missing_events(self, txn, room_id, earliest_events, latest_events, limit): @@ -463,7 +463,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas desc="get_successor_events", ) - defer.returnValue([row["event_id"] for row in rows]) + return [row["event_id"] for row in rows] class EventFederationStore(EventFederationWorkerStore): @@ -654,4 +654,4 @@ class EventFederationStore(EventFederationWorkerStore): if not result: yield self._end_background_update(self.EVENT_AUTH_STATE_ONLY) - defer.returnValue(batch_size) + return batch_size diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index dcfb67e029..22025effbc 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -100,7 +100,7 @@ class EventPushActionsWorkerStore(SQLBaseStore): user_id, last_read_event_id, ) - defer.returnValue(ret) + return ret def _get_unread_counts_by_receipt_txn( self, txn, room_id, user_id, last_read_event_id @@ -178,7 +178,7 @@ class EventPushActionsWorkerStore(SQLBaseStore): return [r[0] for r in txn] ret = yield self.runInteraction("get_push_action_users_in_range", f) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def get_unread_push_actions_for_user_in_range_for_http( @@ -279,7 +279,7 @@ class EventPushActionsWorkerStore(SQLBaseStore): # Take only up to the limit. We have to stop at the limit because # one of the subqueries may have hit the limit. - defer.returnValue(notifs[:limit]) + return notifs[:limit] @defer.inlineCallbacks def get_unread_push_actions_for_user_in_range_for_email( @@ -380,7 +380,7 @@ class EventPushActionsWorkerStore(SQLBaseStore): notifs.sort(key=lambda r: -(r["received_ts"] or 0)) # Now return the first `limit` - defer.returnValue(notifs[:limit]) + return notifs[:limit] def get_if_maybe_push_in_range_for_user(self, user_id, min_stream_ordering): """A fast check to see if there might be something to push for the @@ -477,7 +477,7 @@ class EventPushActionsWorkerStore(SQLBaseStore): keyvalues={"event_id": event_id}, desc="remove_push_actions_from_staging", ) - defer.returnValue(res) + return res except Exception: # this method is called from an exception handler, so propagating # another exception here really isn't helpful - there's nothing @@ -732,7 +732,7 @@ class EventPushActionsStore(EventPushActionsWorkerStore): push_actions = yield self.runInteraction("get_push_actions_for_user", f) for pa in push_actions: pa["actions"] = _deserialize_action(pa["actions"], pa["highlight"]) - defer.returnValue(push_actions) + return push_actions @defer.inlineCallbacks def get_time_of_last_push_action_before(self, stream_ordering): @@ -749,7 +749,7 @@ class EventPushActionsStore(EventPushActionsWorkerStore): return txn.fetchone() result = yield self.runInteraction("get_time_of_last_push_action_before", f) - defer.returnValue(result[0] if result else None) + return result[0] if result else None @defer.inlineCallbacks def get_latest_push_action_stream_ordering(self): @@ -758,7 +758,7 @@ class EventPushActionsStore(EventPushActionsWorkerStore): return txn.fetchone() result = yield self.runInteraction("get_latest_push_action_stream_ordering", f) - defer.returnValue(result[0] or 0) + return result[0] or 0 def _remove_push_actions_for_event_id_txn(self, txn, room_id, event_id): # Sad that we have to blow away the cache for the whole room here diff --git a/synapse/storage/events.py b/synapse/storage/events.py index b70457bfc6..88c0180116 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -223,7 +223,7 @@ def _retry_on_integrity_error(func): except self.database_engine.module.IntegrityError: logger.exception("IntegrityError, retrying.") res = yield func(self, *args, delete_existing=True, **kwargs) - defer.returnValue(res) + return res return f @@ -309,7 +309,7 @@ class EventsStore( max_persisted_id = yield self._stream_id_gen.get_current_token() - defer.returnValue(max_persisted_id) + return max_persisted_id @defer.inlineCallbacks @log_function @@ -334,7 +334,7 @@ class EventsStore( yield make_deferred_yieldable(deferred) max_persisted_id = yield self._stream_id_gen.get_current_token() - defer.returnValue((event.internal_metadata.stream_ordering, max_persisted_id)) + return (event.internal_metadata.stream_ordering, max_persisted_id) def _maybe_start_persisting(self, room_id): @defer.inlineCallbacks @@ -595,7 +595,7 @@ class EventsStore( stale = latest_event_ids & result stale_forward_extremities_counter.observe(len(stale)) - defer.returnValue(result) + return result @defer.inlineCallbacks def _get_events_which_are_prevs(self, event_ids): @@ -633,7 +633,7 @@ class EventsStore( "_get_events_which_are_prevs", _get_events_which_are_prevs_txn, chunk ) - defer.returnValue(results) + return results @defer.inlineCallbacks def _get_prevs_before_rejected(self, event_ids): @@ -695,7 +695,7 @@ class EventsStore( "_get_prevs_before_rejected", _get_prevs_before_rejected_txn, chunk ) - defer.returnValue(existing_prevs) + return existing_prevs @defer.inlineCallbacks def _get_new_state_after_events( @@ -796,7 +796,7 @@ class EventsStore( # If they old and new groups are the same then we don't need to do # anything. if old_state_groups == new_state_groups: - defer.returnValue((None, None)) + return (None, None) if len(new_state_groups) == 1 and len(old_state_groups) == 1: # If we're going from one state group to another, lets check if @@ -813,7 +813,7 @@ class EventsStore( # the current state in memory then lets also return that, # but it doesn't matter if we don't. new_state = state_groups_map.get(new_state_group) - defer.returnValue((new_state, delta_ids)) + return (new_state, delta_ids) # Now that we have calculated new_state_groups we need to get # their state IDs so we can resolve to a single state set. @@ -825,7 +825,7 @@ class EventsStore( if len(new_state_groups) == 1: # If there is only one state group, then we know what the current # state is. - defer.returnValue((state_groups_map[new_state_groups.pop()], None)) + return (state_groups_map[new_state_groups.pop()], None) # Ok, we need to defer to the state handler to resolve our state sets. @@ -854,7 +854,7 @@ class EventsStore( state_res_store=StateResolutionStore(self), ) - defer.returnValue((res.state, None)) + return (res.state, None) @defer.inlineCallbacks def _calculate_state_delta(self, room_id, current_state): @@ -877,7 +877,7 @@ class EventsStore( if ev_id != existing_state.get(key) } - defer.returnValue((to_delete, to_insert)) + return (to_delete, to_insert) @log_function def _persist_events_txn( @@ -1564,7 +1564,7 @@ class EventsStore( return count ret = yield self.runInteraction("count_messages", _count_messages) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def count_daily_sent_messages(self): @@ -1585,7 +1585,7 @@ class EventsStore( return count ret = yield self.runInteraction("count_daily_sent_messages", _count_messages) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def count_daily_active_rooms(self): @@ -1600,7 +1600,7 @@ class EventsStore( return count ret = yield self.runInteraction("count_daily_active_rooms", _count) - defer.returnValue(ret) + return ret def get_current_backfill_token(self): """The current minimum token that backfilled events have reached""" @@ -2183,7 +2183,7 @@ class EventsStore( """ to_1, so_1 = yield self._get_event_ordering(event_id1) to_2, so_2 = yield self._get_event_ordering(event_id2) - defer.returnValue((to_1, so_1) > (to_2, so_2)) + return (to_1, so_1) > (to_2, so_2) @cachedInlineCallbacks(max_entries=5000) def _get_event_ordering(self, event_id): @@ -2197,9 +2197,7 @@ class EventsStore( if not res: raise SynapseError(404, "Could not find event %s" % (event_id,)) - defer.returnValue( - (int(res["topological_ordering"]), int(res["stream_ordering"])) - ) + return (int(res["topological_ordering"]), int(res["stream_ordering"])) def get_all_updated_current_state_deltas(self, from_token, to_token, limit): def get_all_updated_current_state_deltas_txn(txn): diff --git a/synapse/storage/events_bg_updates.py b/synapse/storage/events_bg_updates.py index 1ce21d190c..6587f31e2b 100644 --- a/synapse/storage/events_bg_updates.py +++ b/synapse/storage/events_bg_updates.py @@ -135,7 +135,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): if not result: yield self._end_background_update(self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME) - defer.returnValue(result) + return result @defer.inlineCallbacks def _background_reindex_origin_server_ts(self, progress, batch_size): @@ -212,7 +212,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): if not result: yield self._end_background_update(self.EVENT_ORIGIN_SERVER_TS_NAME) - defer.returnValue(result) + return result @defer.inlineCallbacks def _cleanup_extremities_bg_update(self, progress, batch_size): @@ -396,4 +396,4 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): "_cleanup_extremities_bg_update_drop_table", _drop_table_txn ) - defer.returnValue(num_handled) + return num_handled diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 858fc755a1..44441957db 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -157,7 +157,7 @@ class EventsWorkerStore(SQLBaseStore): if event is None and not allow_none: raise NotFoundError("Could not find event %s" % (event_id,)) - defer.returnValue(event) + return event @defer.inlineCallbacks def get_events( @@ -187,7 +187,7 @@ class EventsWorkerStore(SQLBaseStore): allow_rejected=allow_rejected, ) - defer.returnValue({e.event_id: e for e in events}) + return {e.event_id: e for e in events} @defer.inlineCallbacks def get_events_as_list( @@ -217,7 +217,7 @@ class EventsWorkerStore(SQLBaseStore): """ if not event_ids: - defer.returnValue([]) + return [] # there may be duplicates so we cast the list to a set event_entry_map = yield self._get_events_from_cache_or_db( @@ -305,7 +305,7 @@ class EventsWorkerStore(SQLBaseStore): event.unsigned["prev_content"] = prev.content event.unsigned["prev_sender"] = prev.sender - defer.returnValue(events) + return events @defer.inlineCallbacks def _get_events_from_cache_or_db(self, event_ids, allow_rejected=False): @@ -452,7 +452,7 @@ class EventsWorkerStore(SQLBaseStore): without having to create a new transaction for each request for events. """ if not events: - defer.returnValue({}) + return {} events_d = defer.Deferred() with self._event_fetch_lock: @@ -496,7 +496,7 @@ class EventsWorkerStore(SQLBaseStore): ) ) - defer.returnValue({e.event.event_id: e for e in res if e}) + return {e.event.event_id: e for e in res if e} def _fetch_event_rows(self, txn, event_ids): """Fetch event rows from the database @@ -609,7 +609,7 @@ class EventsWorkerStore(SQLBaseStore): self._get_event_cache.prefill((original_ev.event_id,), cache_entry) - defer.returnValue(cache_entry) + return cache_entry @defer.inlineCallbacks def _maybe_redact_event_row(self, original_ev, redactions): @@ -679,7 +679,7 @@ class EventsWorkerStore(SQLBaseStore): desc="have_events_in_timeline", ) - defer.returnValue(set(r["event_id"] for r in rows)) + return set(r["event_id"] for r in rows) @defer.inlineCallbacks def have_seen_events(self, event_ids): @@ -705,7 +705,7 @@ class EventsWorkerStore(SQLBaseStore): input_iterator = iter(event_ids) for chunk in iter(lambda: list(itertools.islice(input_iterator, 100)), []): yield self.runInteraction("have_seen_events", have_seen_events_txn, chunk) - defer.returnValue(results) + return results def get_seen_events_with_rejections(self, event_ids): """Given a list of event ids, check if we rejected them. @@ -816,4 +816,4 @@ class EventsWorkerStore(SQLBaseStore): # it. complexity_v1 = round(state_events / 500, 2) - defer.returnValue({"v1": complexity_v1}) + return {"v1": complexity_v1} diff --git a/synapse/storage/filtering.py b/synapse/storage/filtering.py index b195dc66a0..23b48f6cea 100644 --- a/synapse/storage/filtering.py +++ b/synapse/storage/filtering.py @@ -15,8 +15,6 @@ from canonicaljson import encode_canonical_json -from twisted.internet import defer - from synapse.api.errors import Codes, SynapseError from synapse.util.caches.descriptors import cachedInlineCallbacks @@ -41,7 +39,7 @@ class FilteringStore(SQLBaseStore): desc="get_user_filter", ) - defer.returnValue(db_to_json(def_json)) + return db_to_json(def_json) def add_user_filter(self, user_localpart, user_filter): def_json = encode_canonical_json(user_filter) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 73e6fc6de2..15b01c6958 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -307,15 +307,13 @@ class GroupServerStore(SQLBaseStore): desc="get_group_categories", ) - defer.returnValue( - { - row["category_id"]: { - "is_public": row["is_public"], - "profile": json.loads(row["profile"]), - } - for row in rows + return { + row["category_id"]: { + "is_public": row["is_public"], + "profile": json.loads(row["profile"]), } - ) + for row in rows + } @defer.inlineCallbacks def get_group_category(self, group_id, category_id): @@ -328,7 +326,7 @@ class GroupServerStore(SQLBaseStore): category["profile"] = json.loads(category["profile"]) - defer.returnValue(category) + return category def upsert_group_category(self, group_id, category_id, profile, is_public): """Add/update room category for group @@ -370,15 +368,13 @@ class GroupServerStore(SQLBaseStore): desc="get_group_roles", ) - defer.returnValue( - { - row["role_id"]: { - "is_public": row["is_public"], - "profile": json.loads(row["profile"]), - } - for row in rows + return { + row["role_id"]: { + "is_public": row["is_public"], + "profile": json.loads(row["profile"]), } - ) + for row in rows + } @defer.inlineCallbacks def get_group_role(self, group_id, role_id): @@ -391,7 +387,7 @@ class GroupServerStore(SQLBaseStore): role["profile"] = json.loads(role["profile"]) - defer.returnValue(role) + return role def upsert_group_role(self, group_id, role_id, profile, is_public): """Add/remove user role @@ -960,7 +956,7 @@ class GroupServerStore(SQLBaseStore): _register_user_group_membership_txn, next_id, ) - defer.returnValue(res) + return res @defer.inlineCallbacks def create_group( @@ -1057,9 +1053,9 @@ class GroupServerStore(SQLBaseStore): now = int(self._clock.time_msec()) if row and now < row["valid_until_ms"]: - defer.returnValue(json.loads(row["attestation_json"])) + return json.loads(row["attestation_json"]) - defer.returnValue(None) + return None def get_joined_groups(self, user_id): return self._simple_select_onecol( diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index 081564360f..752e9788a2 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -173,7 +173,7 @@ class MonthlyActiveUsersStore(SQLBaseStore): ) if user_id: count = count + 1 - defer.returnValue(count) + return count @defer.inlineCallbacks def upsert_monthly_active_user(self, user_id): diff --git a/synapse/storage/presence.py b/synapse/storage/presence.py index 42ec8c6bb8..1a0f2d5768 100644 --- a/synapse/storage/presence.py +++ b/synapse/storage/presence.py @@ -90,9 +90,7 @@ class PresenceStore(SQLBaseStore): presence_states, ) - defer.returnValue( - (stream_orderings[-1], self._presence_id_gen.get_current_token()) - ) + return (stream_orderings[-1], self._presence_id_gen.get_current_token()) def _update_presence_txn(self, txn, stream_orderings, presence_states): for stream_id, state in zip(stream_orderings, presence_states): @@ -180,7 +178,7 @@ class PresenceStore(SQLBaseStore): for row in rows: row["currently_active"] = bool(row["currently_active"]) - defer.returnValue({row["user_id"]: UserPresenceState(**row) for row in rows}) + return {row["user_id"]: UserPresenceState(**row) for row in rows} def get_current_presence_token(self): return self._presence_id_gen.get_current_token() diff --git a/synapse/storage/profile.py b/synapse/storage/profile.py index 0ff392bdb4..8a5d8e9b18 100644 --- a/synapse/storage/profile.py +++ b/synapse/storage/profile.py @@ -34,15 +34,13 @@ class ProfileWorkerStore(SQLBaseStore): except StoreError as e: if e.code == 404: # no match - defer.returnValue(ProfileInfo(None, None)) + return ProfileInfo(None, None) return else: raise - defer.returnValue( - ProfileInfo( - avatar_url=profile["avatar_url"], display_name=profile["displayname"] - ) + return ProfileInfo( + avatar_url=profile["avatar_url"], display_name=profile["displayname"] ) def get_profile_displayname(self, user_localpart): @@ -168,7 +166,7 @@ class ProfileStore(ProfileWorkerStore): ) if res: - defer.returnValue(True) + return True res = yield self._simple_select_one_onecol( table="group_invites", @@ -179,4 +177,4 @@ class ProfileStore(ProfileWorkerStore): ) if res: - defer.returnValue(True) + return True diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 98cec8c82b..a6517c4cf3 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -120,7 +120,7 @@ class PushRulesWorkerStore( rules = _load_rules(rows, enabled_map) - defer.returnValue(rules) + return rules @cachedInlineCallbacks(max_entries=5000) def get_push_rules_enabled_for_user(self, user_id): @@ -130,9 +130,7 @@ class PushRulesWorkerStore( retcols=("user_name", "rule_id", "enabled"), desc="get_push_rules_enabled_for_user", ) - defer.returnValue( - {r["rule_id"]: False if r["enabled"] == 0 else True for r in results} - ) + return {r["rule_id"]: False if r["enabled"] == 0 else True for r in results} def have_push_rules_changed_for_user(self, user_id, last_id): if not self.push_rules_stream_cache.has_entity_changed(user_id, last_id): @@ -160,7 +158,7 @@ class PushRulesWorkerStore( ) def bulk_get_push_rules(self, user_ids): if not user_ids: - defer.returnValue({}) + return {} results = {user_id: [] for user_id in user_ids} @@ -182,7 +180,7 @@ class PushRulesWorkerStore( for user_id, rules in results.items(): results[user_id] = _load_rules(rules, enabled_map_by_user.get(user_id, {})) - defer.returnValue(results) + return results @defer.inlineCallbacks def move_push_rule_from_room_to_room(self, new_room_id, user_id, rule): @@ -253,7 +251,7 @@ class PushRulesWorkerStore( result = yield self._bulk_get_push_rules_for_room( event.room_id, state_group, current_state_ids, event=event ) - defer.returnValue(result) + return result @cachedInlineCallbacks(num_args=2, cache_context=True) def _bulk_get_push_rules_for_room( @@ -312,7 +310,7 @@ class PushRulesWorkerStore( rules_by_user = {k: v for k, v in rules_by_user.items() if v is not None} - defer.returnValue(rules_by_user) + return rules_by_user @cachedList( cached_method_name="get_push_rules_enabled_for_user", @@ -322,7 +320,7 @@ class PushRulesWorkerStore( ) def bulk_get_push_rules_enabled(self, user_ids): if not user_ids: - defer.returnValue({}) + return {} results = {user_id: {} for user_id in user_ids} @@ -336,7 +334,7 @@ class PushRulesWorkerStore( for row in rows: enabled = bool(row["enabled"]) results.setdefault(row["user_name"], {})[row["rule_id"]] = enabled - defer.returnValue(results) + return results class PushRuleStore(PushRulesWorkerStore): diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index cfe0a94330..be3d4d9ded 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -63,7 +63,7 @@ class PusherWorkerStore(SQLBaseStore): ret = yield self._simple_select_one_onecol( "pushers", {"user_name": user_id}, "id", allow_none=True ) - defer.returnValue(ret is not None) + return ret is not None def get_pushers_by_app_id_and_pushkey(self, app_id, pushkey): return self.get_pushers_by({"app_id": app_id, "pushkey": pushkey}) @@ -95,7 +95,7 @@ class PusherWorkerStore(SQLBaseStore): ], desc="get_pushers_by", ) - defer.returnValue(self._decode_pushers_rows(ret)) + return self._decode_pushers_rows(ret) @defer.inlineCallbacks def get_all_pushers(self): @@ -106,7 +106,7 @@ class PusherWorkerStore(SQLBaseStore): return self._decode_pushers_rows(rows) rows = yield self.runInteraction("get_all_pushers", get_pushers) - defer.returnValue(rows) + return rows def get_all_updated_pushers(self, last_id, current_id, limit): if last_id == current_id: @@ -205,7 +205,7 @@ class PusherWorkerStore(SQLBaseStore): result = {user_id: False for user_id in user_ids} result.update({r["user_name"]: True for r in rows}) - defer.returnValue(result) + return result class PusherStore(PusherWorkerStore): @@ -343,7 +343,7 @@ class PusherStore(PusherWorkerStore): "throttle_ms": row["throttle_ms"], } - defer.returnValue(params_by_room) + return params_by_room @defer.inlineCallbacks def set_throttle_params(self, pusher_id, room_id, params): diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index b477da12b1..6aa6d98ebb 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -58,7 +58,7 @@ class ReceiptsWorkerStore(SQLBaseStore): @cachedInlineCallbacks() def get_users_with_read_receipts_in_room(self, room_id): receipts = yield self.get_receipts_for_room(room_id, "m.read") - defer.returnValue(set(r["user_id"] for r in receipts)) + return set(r["user_id"] for r in receipts) @cached(num_args=2) def get_receipts_for_room(self, room_id, receipt_type): @@ -92,7 +92,7 @@ class ReceiptsWorkerStore(SQLBaseStore): desc="get_receipts_for_user", ) - defer.returnValue({row["room_id"]: row["event_id"] for row in rows}) + return {row["room_id"]: row["event_id"] for row in rows} @defer.inlineCallbacks def get_receipts_for_user_with_orderings(self, user_id, receipt_type): @@ -110,16 +110,14 @@ class ReceiptsWorkerStore(SQLBaseStore): return txn.fetchall() rows = yield self.runInteraction("get_receipts_for_user_with_orderings", f) - defer.returnValue( - { - row[0]: { - "event_id": row[1], - "topological_ordering": row[2], - "stream_ordering": row[3], - } - for row in rows + return { + row[0]: { + "event_id": row[1], + "topological_ordering": row[2], + "stream_ordering": row[3], } - ) + for row in rows + } @defer.inlineCallbacks def get_linearized_receipts_for_rooms(self, room_ids, to_key, from_key=None): @@ -147,7 +145,7 @@ class ReceiptsWorkerStore(SQLBaseStore): room_ids, to_key, from_key=from_key ) - defer.returnValue([ev for res in results.values() for ev in res]) + return [ev for res in results.values() for ev in res] def get_linearized_receipts_for_room(self, room_id, to_key, from_key=None): """Get receipts for a single room for sending to clients. @@ -197,7 +195,7 @@ class ReceiptsWorkerStore(SQLBaseStore): rows = yield self.runInteraction("get_linearized_receipts_for_room", f) if not rows: - defer.returnValue([]) + return [] content = {} for row in rows: @@ -205,9 +203,7 @@ class ReceiptsWorkerStore(SQLBaseStore): row["user_id"] ] = json.loads(row["data"]) - defer.returnValue( - [{"type": "m.receipt", "room_id": room_id, "content": content}] - ) + return [{"type": "m.receipt", "room_id": room_id, "content": content}] @cachedList( cached_method_name="_get_linearized_receipts_for_room", @@ -217,7 +213,7 @@ class ReceiptsWorkerStore(SQLBaseStore): ) def _get_linearized_receipts_for_rooms(self, room_ids, to_key, from_key=None): if not room_ids: - defer.returnValue({}) + return {} def f(txn): if from_key: @@ -264,7 +260,7 @@ class ReceiptsWorkerStore(SQLBaseStore): room_id: [results[room_id]] if room_id in results else [] for room_id in room_ids } - defer.returnValue(results) + return results def get_all_updated_receipts(self, last_id, current_id, limit=None): if last_id == current_id: @@ -468,7 +464,7 @@ class ReceiptsStore(ReceiptsWorkerStore): ) if event_ts is None: - defer.returnValue(None) + return None now = self._clock.time_msec() logger.debug( @@ -482,7 +478,7 @@ class ReceiptsStore(ReceiptsWorkerStore): max_persisted_id = self._receipts_id_gen.get_current_token() - defer.returnValue((stream_id, max_persisted_id)) + return (stream_id, max_persisted_id) def insert_graph_receipt(self, room_id, receipt_type, user_id, event_ids, data): return self.runInteraction( diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 8b2c2a97ab..999c10a308 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -75,12 +75,12 @@ class RegistrationWorkerStore(SQLBaseStore): info = yield self.get_user_by_id(user_id) if not info: - defer.returnValue(False) + return False now = self.clock.time_msec() trial_duration_ms = self.config.mau_trial_days * 24 * 60 * 60 * 1000 is_trial = (now - info["creation_ts"] * 1000) < trial_duration_ms - defer.returnValue(is_trial) + return is_trial @cached() def get_user_by_access_token(self, token): @@ -115,7 +115,7 @@ class RegistrationWorkerStore(SQLBaseStore): allow_none=True, desc="get_expiration_ts_for_user", ) - defer.returnValue(res) + return res @defer.inlineCallbacks def set_account_validity_for_user( @@ -190,7 +190,7 @@ class RegistrationWorkerStore(SQLBaseStore): desc="get_user_from_renewal_token", ) - defer.returnValue(res) + return res @defer.inlineCallbacks def get_renewal_token_for_user(self, user_id): @@ -209,7 +209,7 @@ class RegistrationWorkerStore(SQLBaseStore): desc="get_renewal_token_for_user", ) - defer.returnValue(res) + return res @defer.inlineCallbacks def get_users_expiring_soon(self): @@ -237,7 +237,7 @@ class RegistrationWorkerStore(SQLBaseStore): self.config.account_validity.renew_at, ) - defer.returnValue(res) + return res @defer.inlineCallbacks def set_renewal_mail_status(self, user_id, email_sent): @@ -280,7 +280,7 @@ class RegistrationWorkerStore(SQLBaseStore): desc="is_server_admin", ) - defer.returnValue(res if res else False) + return res if res else False def _query_for_auth(self, txn, token): sql = ( @@ -311,7 +311,7 @@ class RegistrationWorkerStore(SQLBaseStore): res = yield self.runInteraction( "is_support_user", self.is_support_user_txn, user_id ) - defer.returnValue(res) + return res def is_support_user_txn(self, txn, user_id): res = self._simple_select_one_onecol_txn( @@ -349,7 +349,7 @@ class RegistrationWorkerStore(SQLBaseStore): return 0 ret = yield self.runInteraction("count_users", _count_users) - defer.returnValue(ret) + return ret def count_daily_user_type(self): """ @@ -395,7 +395,7 @@ class RegistrationWorkerStore(SQLBaseStore): return count ret = yield self.runInteraction("count_users", _count_users) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def find_next_generated_user_id_localpart(self): @@ -425,7 +425,7 @@ class RegistrationWorkerStore(SQLBaseStore): if i not in found: return i - defer.returnValue( + return ( ( yield self.runInteraction( "find_next_generated_user_id", _find_next_generated_user_id @@ -447,7 +447,7 @@ class RegistrationWorkerStore(SQLBaseStore): user_id = yield self.runInteraction( "get_user_id_by_threepid", self.get_user_id_by_threepid_txn, medium, address ) - defer.returnValue(user_id) + return user_id def get_user_id_by_threepid_txn(self, txn, medium, address): """Returns user id from threepid @@ -487,7 +487,7 @@ class RegistrationWorkerStore(SQLBaseStore): ["medium", "address", "validated_at", "added_at"], "user_get_threepids", ) - defer.returnValue(ret) + return ret def user_delete_threepid(self, user_id, medium, address): return self._simple_delete( @@ -677,7 +677,7 @@ class RegistrationStore( if end: yield self._end_background_update("users_set_deactivated_flag") - defer.returnValue(batch_size) + return batch_size @defer.inlineCallbacks def add_access_token_to_user(self, user_id, token, device_id, valid_until_ms): @@ -957,7 +957,7 @@ class RegistrationStore( desc="is_guest", ) - defer.returnValue(res if res else False) + return res if res else False def add_user_pending_deactivation(self, user_id): """ @@ -1024,7 +1024,7 @@ class RegistrationStore( yield self._end_background_update("user_threepids_grandfather") - defer.returnValue(1) + return 1 def get_threepid_validation_session( self, medium, client_secret, address=None, sid=None, validated=True @@ -1337,4 +1337,4 @@ class RegistrationStore( ) # Convert the integer into a boolean. - defer.returnValue(res == 1) + return res == 1 diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py index 9954bc094f..fcb5f2f23a 100644 --- a/synapse/storage/relations.py +++ b/synapse/storage/relations.py @@ -17,8 +17,6 @@ import logging import attr -from twisted.internet import defer - from synapse.api.constants import RelationTypes from synapse.api.errors import SynapseError from synapse.storage._base import SQLBaseStore @@ -363,7 +361,7 @@ class RelationsWorkerStore(SQLBaseStore): return edit_event = yield self.get_event(edit_id, allow_none=True) - defer.returnValue(edit_event) + return edit_event def has_user_annotated_event(self, parent_id, event_type, aggregation_key, sender): """Check if a user has already annotated an event with the same key diff --git a/synapse/storage/room.py b/synapse/storage/room.py index fe9d79d792..bc606292b8 100644 --- a/synapse/storage/room.py +++ b/synapse/storage/room.py @@ -193,14 +193,12 @@ class RoomWorkerStore(SQLBaseStore): ) if row: - defer.returnValue( - RatelimitOverride( - messages_per_second=row["messages_per_second"], - burst_count=row["burst_count"], - ) + return RatelimitOverride( + messages_per_second=row["messages_per_second"], + burst_count=row["burst_count"], ) else: - defer.returnValue(None) + return None class RoomStore(RoomWorkerStore, SearchStore): diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index b3c002b9eb..cb88e49b51 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -108,7 +108,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): room_id, on_invalidate=cache_context.invalidate ) hosts = frozenset(get_domain_from_id(user_id) for user_id in user_ids) - defer.returnValue(hosts) + return hosts @cached(max_entries=100000, iterable=True) def get_users_in_room(self, room_id): @@ -253,8 +253,8 @@ class RoomMemberWorkerStore(EventsWorkerStore): invites = yield self.get_invited_rooms_for_user(user_id) for invite in invites: if invite.room_id == room_id: - defer.returnValue(invite) - defer.returnValue(None) + return invite + return None def get_rooms_for_user_where_membership_is(self, user_id, membership_list): """ Get all the rooms for this user where the membership for this user @@ -347,11 +347,9 @@ class RoomMemberWorkerStore(EventsWorkerStore): rooms = yield self.get_rooms_for_user_where_membership_is( user_id, membership_list=[Membership.JOIN] ) - defer.returnValue( - frozenset( - GetRoomsForUserWithStreamOrdering(r.room_id, r.stream_ordering) - for r in rooms - ) + return frozenset( + GetRoomsForUserWithStreamOrdering(r.room_id, r.stream_ordering) + for r in rooms ) @defer.inlineCallbacks @@ -361,7 +359,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): rooms = yield self.get_rooms_for_user_with_stream_ordering( user_id, on_invalidate=on_invalidate ) - defer.returnValue(frozenset(r.room_id for r in rooms)) + return frozenset(r.room_id for r in rooms) @cachedInlineCallbacks(max_entries=500000, cache_context=True, iterable=True) def get_users_who_share_room_with_user(self, user_id, cache_context): @@ -378,7 +376,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): ) user_who_share_room.update(user_ids) - defer.returnValue(user_who_share_room) + return user_who_share_room @defer.inlineCallbacks def get_joined_users_from_context(self, event, context): @@ -394,7 +392,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): result = yield self._get_joined_users_from_context( event.room_id, state_group, current_state_ids, event=event, context=context ) - defer.returnValue(result) + return result def get_joined_users_from_state(self, room_id, state_entry): state_group = state_entry.state_group @@ -508,7 +506,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): avatar_url=to_ascii(event.content.get("avatar_url", None)), ) - defer.returnValue(users_in_room) + return users_in_room @cachedInlineCallbacks(max_entries=10000) def is_host_joined(self, room_id, host): @@ -533,14 +531,14 @@ class RoomMemberWorkerStore(EventsWorkerStore): rows = yield self._execute("is_host_joined", None, sql, room_id, like_clause) if not rows: - defer.returnValue(False) + return False user_id = rows[0][0] if get_domain_from_id(user_id) != host: # This can only happen if the host name has something funky in it raise Exception("Invalid host name") - defer.returnValue(True) + return True @cachedInlineCallbacks() def was_host_joined(self, room_id, host): @@ -573,14 +571,14 @@ class RoomMemberWorkerStore(EventsWorkerStore): rows = yield self._execute("was_host_joined", None, sql, room_id, like_clause) if not rows: - defer.returnValue(False) + return False user_id = rows[0][0] if get_domain_from_id(user_id) != host: # This can only happen if the host name has something funky in it raise Exception("Invalid host name") - defer.returnValue(True) + return True def get_joined_hosts(self, room_id, state_entry): state_group = state_entry.state_group @@ -607,7 +605,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): cache = self._get_joined_hosts_cache(room_id) joined_hosts = yield cache.get_destinations(state_entry) - defer.returnValue(joined_hosts) + return joined_hosts @cached(max_entries=10000) def _get_joined_hosts_cache(self, room_id): @@ -637,7 +635,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): return rows[0][0] count = yield self.runInteraction("did_forget_membership", f) - defer.returnValue(count == 0) + return count == 0 @defer.inlineCallbacks def get_rooms_user_has_been_in(self, user_id): @@ -847,7 +845,7 @@ class RoomMemberStore(RoomMemberWorkerStore): if not result: yield self._end_background_update(_MEMBERSHIP_PROFILE_UPDATE_NAME) - defer.returnValue(result) + return result @defer.inlineCallbacks def _background_current_state_membership(self, progress, batch_size): @@ -905,7 +903,7 @@ class RoomMemberStore(RoomMemberWorkerStore): if finished: yield self._end_background_update(_CURRENT_STATE_MEMBERSHIP_UPDATE_NAME) - defer.returnValue(row_count) + return row_count class _JoinedHostsCache(object): @@ -933,7 +931,7 @@ class _JoinedHostsCache(object): state_entry(synapse.state._StateCacheEntry) """ if state_entry.state_group == self.state_group: - defer.returnValue(frozenset(self.hosts_to_joined_users)) + return frozenset(self.hosts_to_joined_users) with (yield self.linearizer.queue(())): if state_entry.state_group == self.state_group: @@ -970,7 +968,7 @@ class _JoinedHostsCache(object): else: self.state_group = object() self._len = sum(len(v) for v in itervalues(self.hosts_to_joined_users)) - defer.returnValue(frozenset(self.hosts_to_joined_users)) + return frozenset(self.hosts_to_joined_users) def __len__(self): return self._len diff --git a/synapse/storage/search.py b/synapse/storage/search.py index f3b1cec933..df87ab6a6d 100644 --- a/synapse/storage/search.py +++ b/synapse/storage/search.py @@ -166,7 +166,7 @@ class SearchStore(BackgroundUpdateStore): if not result: yield self._end_background_update(self.EVENT_SEARCH_UPDATE_NAME) - defer.returnValue(result) + return result @defer.inlineCallbacks def _background_reindex_gin_search(self, progress, batch_size): @@ -209,7 +209,7 @@ class SearchStore(BackgroundUpdateStore): yield self.runWithConnection(create_index) yield self._end_background_update(self.EVENT_SEARCH_USE_GIN_POSTGRES_NAME) - defer.returnValue(1) + return 1 @defer.inlineCallbacks def _background_reindex_search_order(self, progress, batch_size): @@ -287,7 +287,7 @@ class SearchStore(BackgroundUpdateStore): if not finished: yield self._end_background_update(self.EVENT_SEARCH_ORDER_UPDATE_NAME) - defer.returnValue(num_rows) + return num_rows def store_event_search_txn(self, txn, event, key, value): """Add event to the search table @@ -454,17 +454,15 @@ class SearchStore(BackgroundUpdateStore): count = sum(row["count"] for row in count_results if row["room_id"] in room_ids) - defer.returnValue( - { - "results": [ - {"event": event_map[r["event_id"]], "rank": r["rank"]} - for r in results - if r["event_id"] in event_map - ], - "highlights": highlights, - "count": count, - } - ) + return { + "results": [ + {"event": event_map[r["event_id"]], "rank": r["rank"]} + for r in results + if r["event_id"] in event_map + ], + "highlights": highlights, + "count": count, + } @defer.inlineCallbacks def search_rooms(self, room_ids, search_term, keys, limit, pagination_token=None): @@ -599,22 +597,20 @@ class SearchStore(BackgroundUpdateStore): count = sum(row["count"] for row in count_results if row["room_id"] in room_ids) - defer.returnValue( - { - "results": [ - { - "event": event_map[r["event_id"]], - "rank": r["rank"], - "pagination_token": "%s,%s" - % (r["origin_server_ts"], r["stream_ordering"]), - } - for r in results - if r["event_id"] in event_map - ], - "highlights": highlights, - "count": count, - } - ) + return { + "results": [ + { + "event": event_map[r["event_id"]], + "rank": r["rank"], + "pagination_token": "%s,%s" + % (r["origin_server_ts"], r["stream_ordering"]), + } + for r in results + if r["event_id"] in event_map + ], + "highlights": highlights, + "count": count, + } def _find_highlights_in_postgres(self, search_query, events): """Given a list of events and a search term, return a list of words diff --git a/synapse/storage/signatures.py b/synapse/storage/signatures.py index 6bd81e84ad..fb83218f90 100644 --- a/synapse/storage/signatures.py +++ b/synapse/storage/signatures.py @@ -59,7 +59,7 @@ class SignatureWorkerStore(SQLBaseStore): for e_id, h in hashes.items() } - defer.returnValue(list(hashes.items())) + return list(hashes.items()) def _get_event_reference_hashes_txn(self, txn, event_id): """Get all the hashes for a given PDU. diff --git a/synapse/storage/state.py b/synapse/storage/state.py index a35289876d..1980a87108 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -422,7 +422,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): # Retrieve the room's create event create_event = yield self.get_create_event_for_room(room_id) - defer.returnValue(create_event.content.get("room_version", "1")) + return create_event.content.get("room_version", "1") @defer.inlineCallbacks def get_room_predecessor(self, room_id): @@ -442,7 +442,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): create_event = yield self.get_create_event_for_room(room_id) # Return predecessor if present - defer.returnValue(create_event.content.get("predecessor", None)) + return create_event.content.get("predecessor", None) @defer.inlineCallbacks def get_create_event_for_room(self, room_id): @@ -466,7 +466,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): # Retrieve the room's create event and return create_event = yield self.get_event(create_id) - defer.returnValue(create_event) + return create_event @cached(max_entries=100000, iterable=True) def get_current_state_ids(self, room_id): @@ -563,7 +563,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): if not event: return - defer.returnValue(event.content.get("canonical_alias")) + return event.content.get("canonical_alias") @cached(max_entries=10000, iterable=True) def get_state_group_delta(self, state_group): @@ -613,14 +613,14 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): dict of state_group_id -> (dict of (type, state_key) -> event id) """ if not event_ids: - defer.returnValue({}) + return {} event_to_groups = yield self._get_state_group_for_events(event_ids) groups = set(itervalues(event_to_groups)) group_to_state = yield self._get_state_for_groups(groups) - defer.returnValue(group_to_state) + return group_to_state @defer.inlineCallbacks def get_state_ids_for_group(self, state_group): @@ -634,7 +634,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): """ group_to_state = yield self._get_state_for_groups((state_group,)) - defer.returnValue(group_to_state[state_group]) + return group_to_state[state_group] @defer.inlineCallbacks def get_state_groups(self, room_id, event_ids): @@ -645,7 +645,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): dict of state_group_id -> list of state events. """ if not event_ids: - defer.returnValue({}) + return {} group_to_ids = yield self.get_state_groups_ids(room_id, event_ids) @@ -658,16 +658,14 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): get_prev_content=False, ) - defer.returnValue( - { - group: [ - state_event_map[v] - for v in itervalues(event_id_map) - if v in state_event_map - ] - for group, event_id_map in iteritems(group_to_ids) - } - ) + return { + group: [ + state_event_map[v] + for v in itervalues(event_id_map) + if v in state_event_map + ] + for group, event_id_map in iteritems(group_to_ids) + } @defer.inlineCallbacks def _get_state_groups_from_groups(self, groups, state_filter): @@ -694,7 +692,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): ) results.update(res) - defer.returnValue(results) + return results def _get_state_groups_from_groups_txn( self, txn, groups, state_filter=StateFilter.all() @@ -829,7 +827,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): for event_id, group in iteritems(event_to_groups) } - defer.returnValue({event: event_to_state[event] for event in event_ids}) + return {event: event_to_state[event] for event in event_ids} @defer.inlineCallbacks def get_state_ids_for_events(self, event_ids, state_filter=StateFilter.all()): @@ -855,7 +853,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): for event_id, group in iteritems(event_to_groups) } - defer.returnValue({event: event_to_state[event] for event in event_ids}) + return {event: event_to_state[event] for event in event_ids} @defer.inlineCallbacks def get_state_for_event(self, event_id, state_filter=StateFilter.all()): @@ -871,7 +869,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): A deferred dict from (type, state_key) -> state_event """ state_map = yield self.get_state_for_events([event_id], state_filter) - defer.returnValue(state_map[event_id]) + return state_map[event_id] @defer.inlineCallbacks def get_state_ids_for_event(self, event_id, state_filter=StateFilter.all()): @@ -887,7 +885,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): A deferred dict from (type, state_key) -> state_event """ state_map = yield self.get_state_ids_for_events([event_id], state_filter) - defer.returnValue(state_map[event_id]) + return state_map[event_id] @cached(max_entries=50000) def _get_state_group_for_event(self, event_id): @@ -917,7 +915,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): desc="_get_state_group_for_events", ) - defer.returnValue({row["event_id"]: row["state_group"] for row in rows}) + return {row["event_id"]: row["state_group"] for row in rows} def _get_state_for_group_using_cache(self, cache, group, state_filter): """Checks if group is in cache. See `_get_state_for_groups` @@ -997,7 +995,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): incomplete_groups = incomplete_groups_m | incomplete_groups_nm if not incomplete_groups: - defer.returnValue(state) + return state cache_sequence_nm = self._state_group_cache.sequence cache_sequence_m = self._state_group_members_cache.sequence @@ -1024,7 +1022,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): # everything we need from the database anyway. state[group] = state_filter.filter_state(group_state_dict) - defer.returnValue(state) + return state def _get_state_for_groups_using_cache(self, groups, cache, state_filter): """Gets the state at each of a list of state groups, optionally @@ -1498,7 +1496,7 @@ class StateStore(StateGroupWorkerStore, BackgroundUpdateStore): self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME ) - defer.returnValue(result * BATCH_SIZE_SCALE_FACTOR) + return result * BATCH_SIZE_SCALE_FACTOR @defer.inlineCallbacks def _background_index_state(self, progress, batch_size): @@ -1528,4 +1526,4 @@ class StateStore(StateGroupWorkerStore, BackgroundUpdateStore): yield self._end_background_update(self.STATE_GROUP_INDEX_UPDATE_NAME) - defer.returnValue(1) + return 1 diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py index 1cec84ee2e..e893b05ee7 100644 --- a/synapse/storage/stats.py +++ b/synapse/storage/stats.py @@ -66,7 +66,7 @@ class StatsStore(StateDeltasStore): if not self.stats_enabled: yield self._end_background_update("populate_stats_createtables") - defer.returnValue(1) + return 1 # Get all the rooms that we want to process. def _make_staging_area(txn): @@ -120,7 +120,7 @@ class StatsStore(StateDeltasStore): self.get_earliest_token_for_room_stats.invalidate_all() yield self._end_background_update("populate_stats_createtables") - defer.returnValue(1) + return 1 @defer.inlineCallbacks def _populate_stats_cleanup(self, progress, batch_size): @@ -129,7 +129,7 @@ class StatsStore(StateDeltasStore): """ if not self.stats_enabled: yield self._end_background_update("populate_stats_cleanup") - defer.returnValue(1) + return 1 position = yield self._simple_select_one_onecol( TEMP_TABLE + "_position", None, "position" @@ -143,14 +143,14 @@ class StatsStore(StateDeltasStore): yield self.runInteraction("populate_stats_cleanup", _delete_staging_area) yield self._end_background_update("populate_stats_cleanup") - defer.returnValue(1) + return 1 @defer.inlineCallbacks def _populate_stats_process_rooms(self, progress, batch_size): if not self.stats_enabled: yield self._end_background_update("populate_stats_process_rooms") - defer.returnValue(1) + return 1 # If we don't have progress filed, delete everything. if not progress: @@ -186,7 +186,7 @@ class StatsStore(StateDeltasStore): # No more rooms -- complete the transaction. if not rooms_to_work_on: yield self._end_background_update("populate_stats_process_rooms") - defer.returnValue(1) + return 1 logger.info( "Processing the next %d rooms of %d remaining", @@ -303,9 +303,9 @@ class StatsStore(StateDeltasStore): if processed_event_count > batch_size: # Don't process any more rooms, we've hit our batch size. - defer.returnValue(processed_event_count) + return processed_event_count - defer.returnValue(processed_event_count) + return processed_event_count def delete_all_stats(self): """ diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index a0465484df..856c2ee8d8 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -300,7 +300,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ) if not room_ids: - defer.returnValue({}) + return {} results = {} room_ids = list(room_ids) @@ -323,7 +323,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ) results.update(dict(zip(rm_ids, res))) - defer.returnValue(results) + return results def get_rooms_that_changed(self, room_ids, from_key): """Given a list of rooms and a token, return rooms where there may have @@ -364,7 +364,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): the chunk of events returned. """ if from_key == to_key: - defer.returnValue(([], from_key)) + return ([], from_key) from_id = RoomStreamToken.parse_stream_token(from_key).stream to_id = RoomStreamToken.parse_stream_token(to_key).stream @@ -374,7 +374,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ) if not has_changed: - defer.returnValue(([], from_key)) + return ([], from_key) def f(txn): sql = ( @@ -407,7 +407,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): # get. key = from_key - defer.returnValue((ret, key)) + return (ret, key) @defer.inlineCallbacks def get_membership_changes_for_user(self, user_id, from_key, to_key): @@ -415,14 +415,14 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): to_id = RoomStreamToken.parse_stream_token(to_key).stream if from_key == to_key: - defer.returnValue([]) + return [] if from_id: has_changed = self._membership_stream_cache.has_entity_changed( user_id, int(from_id) ) if not has_changed: - defer.returnValue([]) + return [] def f(txn): sql = ( @@ -447,7 +447,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): self._set_before_and_after(ret, rows, topo_order=False) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def get_recent_events_for_room(self, room_id, limit, end_token): @@ -477,7 +477,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): self._set_before_and_after(events, rows) - defer.returnValue((events, token)) + return (events, token) @defer.inlineCallbacks def get_recent_event_ids_for_room(self, room_id, limit, end_token): @@ -496,7 +496,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): """ # Allow a zero limit here, and no-op. if limit == 0: - defer.returnValue(([], end_token)) + return ([], end_token) end_token = RoomStreamToken.parse(end_token) @@ -511,7 +511,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): # We want to return the results in ascending order. rows.reverse() - defer.returnValue((rows, token)) + return (rows, token) def get_room_event_after_stream_ordering(self, room_id, stream_ordering): """Gets details of the first event in a room at or after a stream ordering @@ -549,12 +549,12 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): """ token = yield self.get_room_max_stream_ordering() if room_id is None: - defer.returnValue("s%d" % (token,)) + return "s%d" % (token,) else: topo = yield self.runInteraction( "_get_max_topological_txn", self._get_max_topological_txn, room_id ) - defer.returnValue("t%d-%d" % (topo, token)) + return "t%d-%d" % (topo, token) def get_stream_token_for_event(self, event_id): """The stream token for an event @@ -674,14 +674,12 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): [e for e in results["after"]["event_ids"]], get_prev_content=True ) - defer.returnValue( - { - "events_before": events_before, - "events_after": events_after, - "start": results["before"]["token"], - "end": results["after"]["token"], - } - ) + return { + "events_before": events_before, + "events_after": events_after, + "start": results["before"]["token"], + "end": results["after"]["token"], + } def _get_events_around_txn( self, txn, room_id, event_id, before_limit, after_limit, event_filter @@ -785,7 +783,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): events = yield self.get_events_as_list(event_ids) - defer.returnValue((upper_bound, events)) + return (upper_bound, events) def get_federation_out_pos(self, typ): return self._simple_select_one_onecol( @@ -939,7 +937,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): self._set_before_and_after(events, rows) - defer.returnValue((events, token)) + return (events, token) class StreamStore(StreamWorkerStore): diff --git a/synapse/storage/tags.py b/synapse/storage/tags.py index e88f8ea35f..20dd6bd53d 100644 --- a/synapse/storage/tags.py +++ b/synapse/storage/tags.py @@ -66,7 +66,7 @@ class TagsWorkerStore(AccountDataWorkerStore): room_id string, tag string and content string. """ if last_id == current_id: - defer.returnValue([]) + return [] def get_all_updated_tags_txn(txn): sql = ( @@ -107,7 +107,7 @@ class TagsWorkerStore(AccountDataWorkerStore): ) results.extend(tags) - defer.returnValue(results) + return results @defer.inlineCallbacks def get_updated_tags(self, user_id, stream_id): @@ -135,7 +135,7 @@ class TagsWorkerStore(AccountDataWorkerStore): user_id, int(stream_id) ) if not changed: - defer.returnValue({}) + return {} room_ids = yield self.runInteraction("get_updated_tags", get_updated_tags_txn) @@ -145,7 +145,7 @@ class TagsWorkerStore(AccountDataWorkerStore): for room_id in room_ids: results[room_id] = tags_by_room.get(room_id, {}) - defer.returnValue(results) + return results def get_tags_for_room(self, user_id, room_id): """Get all the tags for the given room @@ -194,7 +194,7 @@ class TagsStore(TagsWorkerStore): self.get_tags_for_user.invalidate((user_id,)) result = self._account_data_id_gen.get_current_token() - defer.returnValue(result) + return result @defer.inlineCallbacks def remove_tag_from_room(self, user_id, room_id, tag): @@ -217,7 +217,7 @@ class TagsStore(TagsWorkerStore): self.get_tags_for_user.invalidate((user_id,)) result = self._account_data_id_gen.get_current_token() - defer.returnValue(result) + return result def _update_revision_txn(self, txn, user_id, room_id, next_id): """Update the latest revision of the tags for the given user and room. diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index c585cf6cf7..b3c3bf55bc 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -147,7 +147,7 @@ class TransactionStore(SQLBaseStore): result = self._destination_retry_cache.get(destination, SENTINEL) if result is not SENTINEL: - defer.returnValue(result) + return result result = yield self.runInteraction( "get_destination_retry_timings", @@ -158,7 +158,7 @@ class TransactionStore(SQLBaseStore): # We don't hugely care about race conditions between getting and # invalidating the cache, since we time out fairly quickly anyway. self._destination_retry_cache[destination] = result - defer.returnValue(result) + return result def _get_destination_retry_timings(self, txn, destination): result = self._simple_select_one_txn( diff --git a/synapse/storage/user_directory.py b/synapse/storage/user_directory.py index 7fd16fe65e..b5188d9bee 100644 --- a/synapse/storage/user_directory.py +++ b/synapse/storage/user_directory.py @@ -109,7 +109,7 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore): yield self._simple_insert(TEMP_TABLE + "_position", {"position": new_pos}) yield self._end_background_update("populate_user_directory_createtables") - defer.returnValue(1) + return 1 @defer.inlineCallbacks def _populate_user_directory_cleanup(self, progress, batch_size): @@ -131,7 +131,7 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore): ) yield self._end_background_update("populate_user_directory_cleanup") - defer.returnValue(1) + return 1 @defer.inlineCallbacks def _populate_user_directory_process_rooms(self, progress, batch_size): @@ -177,7 +177,7 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore): # No more rooms -- complete the transaction. if not rooms_to_work_on: yield self._end_background_update("populate_user_directory_process_rooms") - defer.returnValue(1) + return 1 logger.info( "Processing the next %d rooms of %d remaining" @@ -257,9 +257,9 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore): if processed_event_count > batch_size: # Don't process any more rooms, we've hit our batch size. - defer.returnValue(processed_event_count) + return processed_event_count - defer.returnValue(processed_event_count) + return processed_event_count @defer.inlineCallbacks def _populate_user_directory_process_users(self, progress, batch_size): @@ -268,7 +268,7 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore): """ if not self.hs.config.user_directory_search_all_users: yield self._end_background_update("populate_user_directory_process_users") - defer.returnValue(1) + return 1 def _get_next_batch(txn): sql = "SELECT user_id FROM %s LIMIT %s" % ( @@ -298,7 +298,7 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore): # No more users -- complete the transaction. if not users_to_work_on: yield self._end_background_update("populate_user_directory_process_users") - defer.returnValue(1) + return 1 logger.info( "Processing the next %d users of %d remaining" @@ -322,7 +322,7 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore): progress, ) - defer.returnValue(len(users_to_work_on)) + return len(users_to_work_on) @defer.inlineCallbacks def is_room_world_readable_or_publicly_joinable(self, room_id): @@ -344,16 +344,16 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore): join_rule_ev = yield self.get_event(join_rules_id, allow_none=True) if join_rule_ev: if join_rule_ev.content.get("join_rule") == JoinRules.PUBLIC: - defer.returnValue(True) + return True hist_vis_id = current_state_ids.get((EventTypes.RoomHistoryVisibility, "")) if hist_vis_id: hist_vis_ev = yield self.get_event(hist_vis_id, allow_none=True) if hist_vis_ev: if hist_vis_ev.content.get("history_visibility") == "world_readable": - defer.returnValue(True) + return True - defer.returnValue(False) + return False def update_profile_in_user_dir(self, user_id, display_name, avatar_url): """ @@ -499,7 +499,7 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore): user_ids = set(user_ids_share_pub) user_ids.update(user_ids_share_priv) - defer.returnValue(user_ids) + return user_ids def add_users_who_share_private_room(self, room_id, user_id_tuples): """Insert entries into the users_who_share_private_rooms table. The first @@ -609,7 +609,7 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore): users = set(pub_rows) users.update(rows) - defer.returnValue(list(users)) + return list(users) @defer.inlineCallbacks def get_rooms_in_common_for_users(self, user_id, other_user_id): @@ -635,7 +635,7 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore): "get_rooms_in_common_for_users", None, sql, user_id, other_user_id ) - defer.returnValue([room_id for room_id, in rows]) + return [room_id for room_id, in rows] def delete_all_from_user_dir(self): """Delete the entire user directory @@ -782,7 +782,7 @@ class UserDirectoryStore(StateDeltasStore, BackgroundUpdateStore): limited = len(results) > limit - defer.returnValue({"limited": limited, "results": results}) + return {"limited": limited, "results": results} def _parse_query_sqlite(search_term): diff --git a/synapse/storage/user_erasure_store.py b/synapse/storage/user_erasure_store.py index 1815fdc0dd..05cabc2282 100644 --- a/synapse/storage/user_erasure_store.py +++ b/synapse/storage/user_erasure_store.py @@ -12,9 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import operator -from twisted.internet import defer +import operator from synapse.storage._base import SQLBaseStore from synapse.util.caches.descriptors import cached, cachedList @@ -67,7 +66,7 @@ class UserErasureWorkerStore(SQLBaseStore): erased_users = yield self.runInteraction("are_users_erased", _get_erased_users) res = dict((u, u in erased_users) for u in user_ids) - defer.returnValue(res) + return res class UserErasureStore(UserErasureWorkerStore): diff --git a/synapse/streams/events.py b/synapse/streams/events.py index 488c49747a..b91fb2db7b 100644 --- a/synapse/streams/events.py +++ b/synapse/streams/events.py @@ -56,7 +56,7 @@ class EventSources(object): device_list_key=device_list_key, groups_key=groups_key, ) - defer.returnValue(token) + return token @defer.inlineCallbacks def get_current_token_for_pagination(self): @@ -80,4 +80,4 @@ class EventSources(object): device_list_key=0, groups_key=0, ) - defer.returnValue(token) + return token diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index f506b2a695..841625a991 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -49,7 +49,7 @@ class Clock(object): with context.PreserveLoggingContext(): self._reactor.callLater(seconds, d.callback, seconds) res = yield d - defer.returnValue(res) + return res def time(self): """Returns the current system time in seconds since epoch.""" diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 58a6b8764f..f1c46836b1 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -366,7 +366,7 @@ class ReadWriteLock(object): new_defer.callback(None) self.key_to_current_readers.get(key, set()).discard(new_defer) - defer.returnValue(_ctx_manager()) + return _ctx_manager() @defer.inlineCallbacks def write(self, key): @@ -396,7 +396,7 @@ class ReadWriteLock(object): if self.key_to_current_writer[key] == new_defer: self.key_to_current_writer.pop(key) - defer.returnValue(_ctx_manager()) + return _ctx_manager() def _cancelled_to_timed_out_error(value, timeout): diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 675db2f448..a1acacbde9 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -289,7 +289,7 @@ class CacheDescriptor(_CacheDescriptorBase): def foo(self, key, cache_context): r1 = yield self.bar1(key, on_invalidate=cache_context.invalidate) r2 = yield self.bar2(key, on_invalidate=cache_context.invalidate) - defer.returnValue(r1 + r2) + return r1 + r2 Args: num_args (int): number of positional arguments (excluding ``self`` and diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py index d6908e169d..82d3eefe0e 100644 --- a/synapse/util/caches/response_cache.py +++ b/synapse/util/caches/response_cache.py @@ -121,7 +121,7 @@ class ResponseCache(object): @defer.inlineCallbacks def handle_request(request): # etc - defer.returnValue(result) + return result result = yield response_cache.wrap( key, diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index c30b6de19c..0910930c21 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -67,7 +67,7 @@ def measure_func(name): def measured_func(self, *args, **kwargs): with Measure(self.clock, name): r = yield func(self, *args, **kwargs) - defer.returnValue(r) + return r return measured_func diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index d8d0ceae51..0862b5ca5a 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -95,15 +95,13 @@ def get_retry_limiter(destination, clock, store, ignore_backoff=False, **kwargs) # maximum backoff even though it might only have been down briefly backoff_on_failure = not ignore_backoff - defer.returnValue( - RetryDestinationLimiter( - destination, - clock, - store, - retry_interval, - backoff_on_failure=backoff_on_failure, - **kwargs - ) + return RetryDestinationLimiter( + destination, + clock, + store, + retry_interval, + backoff_on_failure=backoff_on_failure, + **kwargs ) diff --git a/synapse/visibility.py b/synapse/visibility.py index 2a11c83596..bf0f1eebd8 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -208,7 +208,7 @@ def filter_events_for_client( filtered_events = filter(operator.truth, filtered_events) # we turn it into a list before returning it. - defer.returnValue(list(filtered_events)) + return list(filtered_events) @defer.inlineCallbacks @@ -317,11 +317,11 @@ def filter_events_for_server( elif redact: to_return.append(prune_event(e)) - defer.returnValue(to_return) + return to_return # If there are no erased users then we can just return the given list # of events without having to copy it. - defer.returnValue(events) + return events # Ok, so we're dealing with events that have non-trivial visibility # rules, so we need to also get the memberships of the room. @@ -384,4 +384,4 @@ def filter_events_for_server( elif redact: to_return.append(prune_event(e)) - defer.returnValue(to_return) + return to_return diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 8d94a503d6..c4f0bbd3dd 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -107,7 +107,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): self.assertEquals(LoggingContext.current_context().request, "11") with PreserveLoggingContext(): yield persp_deferred - defer.returnValue(persp_resp) + return persp_resp self.http_client.post_json.side_effect = get_perspectives @@ -554,7 +554,7 @@ def run_in_context(f, *args, **kwargs): # logs. ctx.request = "testctx" rv = yield f(*args, **kwargs) - defer.returnValue(rv) + return rv def _verify_json_for_server(kr, *args): @@ -565,6 +565,6 @@ def _verify_json_for_server(kr, *args): @defer.inlineCallbacks def v(): rv1 = yield kr.verify_json_for_server(*args) - defer.returnValue(rv1) + return rv1 return run_in_context(v) diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 90d0129374..99dce45cfe 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -283,4 +283,4 @@ class RegistrationTestCase(unittest.HomeserverTestCase): user, requester, displayname, by_admin=True ) - defer.returnValue((user_id, token)) + return (user_id, token) diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index a49f9b3224..b906686b49 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -145,7 +145,7 @@ class MatrixFederationAgentTests(TestCase): try: fetch_res = yield fetch_d - defer.returnValue(fetch_res) + return fetch_res except Exception as e: logger.info("Fetch of %s failed: %s", uri.decode("ascii"), e) raise @@ -936,7 +936,7 @@ class MatrixFederationAgentTests(TestCase): except Exception as e: logger.warning("Error fetching well-known: %s", e) raise - defer.returnValue(result) + return result def test_well_known_cache(self): self.reactor.lookups["testserv"] = "1.2.3.4" diff --git a/tests/http/federation/test_srv_resolver.py b/tests/http/federation/test_srv_resolver.py index 65b51dc981..3b885ef64b 100644 --- a/tests/http/federation/test_srv_resolver.py +++ b/tests/http/federation/test_srv_resolver.py @@ -61,7 +61,7 @@ class SrvResolverTestCase(unittest.TestCase): # should have restored our context self.assertIs(LoggingContext.current_context(), ctx) - defer.returnValue(result) + return result test_d = do_lookup() self.assertNoResult(test_d) diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py index b9d6d7ad1c..2b01f40a42 100644 --- a/tests/http/test_fedclient.py +++ b/tests/http/test_fedclient.py @@ -68,7 +68,7 @@ class FederationClientTests(HomeserverTestCase): try: fetch_res = yield fetch_d - defer.returnValue(fetch_res) + return fetch_res finally: check_logcontext(context) diff --git a/tests/rest/client/test_transactions.py b/tests/rest/client/test_transactions.py index a8adc9a61d..a3d7e3c046 100644 --- a/tests/rest/client/test_transactions.py +++ b/tests/rest/client/test_transactions.py @@ -46,7 +46,7 @@ class HttpTransactionCacheTestCase(unittest.TestCase): @defer.inlineCallbacks def cb(): yield Clock(reactor).sleep(0) - defer.returnValue("yay") + return "yay" @defer.inlineCallbacks def test(): diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index fbb9302694..9fabe3fbc0 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -43,7 +43,7 @@ class BackgroundUpdateTestCase(unittest.TestCase): "test_update", progress, ) - defer.returnValue(count) + return count self.update_handler.side_effect = update @@ -60,7 +60,7 @@ class BackgroundUpdateTestCase(unittest.TestCase): @defer.inlineCallbacks def update(progress, count): yield self.store._end_background_update("test_update") - defer.returnValue(count) + return count self.update_handler.side_effect = update self.update_handler.reset_mock() diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py index 732a778fab..1cb471205b 100644 --- a/tests/storage/test_redaction.py +++ b/tests/storage/test_redaction.py @@ -69,7 +69,7 @@ class RedactionTestCase(unittest.TestCase): yield self.store.persist_event(event, context) - defer.returnValue(event) + return event @defer.inlineCallbacks def inject_message(self, room, user, body): @@ -92,7 +92,7 @@ class RedactionTestCase(unittest.TestCase): yield self.store.persist_event(event, context) - defer.returnValue(event) + return event @defer.inlineCallbacks def inject_redaction(self, room, event_id, user, reason): diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index 73ed943f5a..c6e8196b91 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -67,7 +67,7 @@ class RoomMemberStoreTestCase(unittest.TestCase): yield self.store.persist_event(event, context) - defer.returnValue(event) + return event @defer.inlineCallbacks def test_one_member(self): diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index 212a7ae765..5c2cf3c2db 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -65,7 +65,7 @@ class StateStoreTestCase(tests.unittest.TestCase): yield self.store.persist_event(event, context) - defer.returnValue(event) + return event def assertStateMapEqual(self, s1, s2): for t in s1: diff --git a/tests/test_visibility.py b/tests/test_visibility.py index 118c3bd238..e0605dac2f 100644 --- a/tests/test_visibility.py +++ b/tests/test_visibility.py @@ -139,7 +139,7 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase): builder ) yield self.hs.get_datastore().persist_event(event, context) - defer.returnValue(event) + return event @defer.inlineCallbacks def inject_room_member(self, user_id, membership="join", extra_content={}): @@ -161,7 +161,7 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase): ) yield self.hs.get_datastore().persist_event(event, context) - defer.returnValue(event) + return event @defer.inlineCallbacks def inject_message(self, user_id, content=None): @@ -182,7 +182,7 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase): ) yield self.hs.get_datastore().persist_event(event, context) - defer.returnValue(event) + return event @defer.inlineCallbacks def test_large_room(self): diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 7807328e2f..56320bbaf9 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -159,7 +159,7 @@ class DescriptorTestCase(unittest.TestCase): def inner_fn(): with PreserveLoggingContext(): yield complete_lookup - defer.returnValue(1) + return 1 return inner_fn() @@ -169,7 +169,7 @@ class DescriptorTestCase(unittest.TestCase): c1.name = "c1" r = yield obj.fn(1) self.assertEqual(LoggingContext.current_context(), c1) - defer.returnValue(r) + return r def check_result(r): self.assertEqual(r, 1) @@ -286,7 +286,7 @@ class CachedListDescriptorTestCase(unittest.TestCase): # we want this to behave like an asynchronous function yield run_on_reactor() assert LoggingContext.current_context().request == "c1" - defer.returnValue(self.mock(args1, arg2)) + return self.mock(args1, arg2) with LoggingContext() as c1: c1.request = "c1" @@ -334,7 +334,7 @@ class CachedListDescriptorTestCase(unittest.TestCase): def list_fn(self, args1, arg2): # we want this to behave like an asynchronous function yield run_on_reactor() - defer.returnValue(self.mock(args1, arg2)) + return self.mock(args1, arg2) obj = Cls() invalidate0 = mock.Mock() diff --git a/tests/utils.py b/tests/utils.py index 8a94ce0b47..425e3387db 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -361,7 +361,7 @@ def setup_test_homeserver( if fed: register_federation_servlets(hs, fed) - defer.returnValue(hs) + return hs def register_federation_servlets(hs, resource): @@ -465,9 +465,9 @@ class MockHttpResource(HttpServer): args = [urlparse.unquote(u) for u in matcher.groups()] (code, response) = yield func(mock_request, *args) - defer.returnValue((code, response)) + return (code, response) except CodeMessageException as e: - defer.returnValue((e.code, cs_error(e.msg, code=e.errcode))) + return (e.code, cs_error(e.msg, code=e.errcode)) raise KeyError("No event can handle %s" % path) From 65afc535a6a3fd61ea91c99ed2284b6fd0c4e204 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Jul 2019 15:14:21 +0100 Subject: [PATCH 28/72] Update changelog.d/5743.bugfix Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/5743.bugfix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/5743.bugfix b/changelog.d/5743.bugfix index a160e9945f..65728ff079 100644 --- a/changelog.d/5743.bugfix +++ b/changelog.d/5743.bugfix @@ -1 +1 @@ -Log when we receive receipt from a different origin. +Log when we receive an event receipt from an unexpected origin. From 3641784e8c63e1ac0deaa99519d53c0bf2853993 Mon Sep 17 00:00:00 2001 From: Jorik Schellekens Date: Tue, 23 Jul 2019 15:46:04 +0100 Subject: [PATCH 29/72] Make Jaeger fully configurable (#5694) * Allow Jaeger to be configured * Update sample config --- changelog.d/5694.misc | 1 + docs/sample_config.yaml | 16 ++++++++++++++++ synapse/config/tracer.py | 22 ++++++++++++++++++++++ synapse/logging/opentracing.py | 11 +++++++---- 4 files changed, 46 insertions(+), 4 deletions(-) create mode 100644 changelog.d/5694.misc diff --git a/changelog.d/5694.misc b/changelog.d/5694.misc new file mode 100644 index 0000000000..3b12dcc849 --- /dev/null +++ b/changelog.d/5694.misc @@ -0,0 +1 @@ +Make Jaeger fully configurable. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 0a96197ca6..7edf15207a 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1430,3 +1430,19 @@ opentracing: # #homeserver_whitelist: # - ".*" + + # Jaeger can be configured to sample traces at different rates. + # All configuration options provided by Jaeger can be set here. + # Jaeger's configuration mostly related to trace sampling which + # is documented here: + # https://www.jaegertracing.io/docs/1.13/sampling/. + # + #jaeger_config: + # sampler: + # type: const + # param: 1 + + # Logging whether spans were started and reported + # + # logging: + # false diff --git a/synapse/config/tracer.py b/synapse/config/tracer.py index 4479454415..95e7ccb3a3 100644 --- a/synapse/config/tracer.py +++ b/synapse/config/tracer.py @@ -23,6 +23,12 @@ class TracerConfig(Config): opentracing_config = {} self.opentracer_enabled = opentracing_config.get("enabled", False) + + self.jaeger_config = opentracing_config.get( + "jaeger_config", + {"sampler": {"type": "const", "param": 1}, "logging": False}, + ) + if not self.opentracer_enabled: return @@ -56,4 +62,20 @@ class TracerConfig(Config): # #homeserver_whitelist: # - ".*" + + # Jaeger can be configured to sample traces at different rates. + # All configuration options provided by Jaeger can be set here. + # Jaeger's configuration mostly related to trace sampling which + # is documented here: + # https://www.jaegertracing.io/docs/1.13/sampling/. + # + #jaeger_config: + # sampler: + # type: const + # param: 1 + + # Logging whether spans were started and reported + # + # logging: + # false """ diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index fb338ca223..d2c209c471 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -228,13 +228,16 @@ def init_tracer(config): # Include the worker name name = config.worker_name if config.worker_name else "master" + # Pull out the jaeger config if it was given. Otherwise set it to something sensible. + # See https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/config.py + set_homeserver_whitelist(config.opentracer_whitelist) - jaeger_config = JaegerConfig( - config={"sampler": {"type": "const", "param": 1}, "logging": True}, + + JaegerConfig( + config=config.jaeger_config, service_name="{} {}".format(config.server_name, name), scope_manager=LogContextScopeManager(config), - ) - jaeger_config.initialize_tracer() + ).initialize_tracer() # Set up tags to be opentracing's tags global tags From 73bbaf2bc6962df2c25443cbc70286318601af5a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Jul 2019 16:55:45 +0100 Subject: [PATCH 30/72] Add unit test for current state membership bg update --- tests/storage/test_roommember.py | 37 +++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index 73ed943f5a..b04be921f4 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -20,7 +20,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, Membership from synapse.api.room_versions import RoomVersions -from synapse.types import RoomID, UserID +from synapse.types import Requester, RoomID, UserID from tests import unittest from tests.utils import create_room, setup_test_homeserver @@ -84,3 +84,38 @@ class RoomMemberStoreTestCase(unittest.TestCase): ) ], ) + + +class CurrentStateMembershipUpdateTestCase(unittest.HomeserverTestCase): + def prepare(self, reactor, clock, homeserver): + self.store = homeserver.get_datastore() + self.room_creator = homeserver.get_room_creation_handler() + + def test_can_rerun_update(self): + # First make sure we have completed all updates. + while not self.get_success(self.store.has_completed_background_updates()): + self.get_success(self.store.do_next_background_update(100), by=0.1) + + # Now let's create a room, which will insert a membership + user = UserID("alice", "test") + requester = Requester(user, None, False, None, None) + self.get_success(self.room_creator.create_room(requester, {})) + + # Register the background update to run again. + self.get_success( + self.store._simple_insert( + table="background_updates", + values={ + "update_name": "current_state_events_membership", + "progress_json": "{}", + "depends_on": None, + }, + ) + ) + + # ... and tell the DataStore that it hasn't finished all updates yet + self.store._all_done = False + + # Now let's actually drive the updates to completion + while not self.get_success(self.store.has_completed_background_updates()): + self.get_success(self.store.do_next_background_update(100), by=0.1) From adcd5368b0f58bc457b25e5af993c89f8daa9300 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Jul 2019 16:58:13 +0100 Subject: [PATCH 31/72] Newsfile --- changelog.d/5746.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5746.misc diff --git a/changelog.d/5746.misc b/changelog.d/5746.misc new file mode 100644 index 0000000000..5e15dfd5fa --- /dev/null +++ b/changelog.d/5746.misc @@ -0,0 +1 @@ +Reduce database IO usage by optimising queries for current membership. From 418635e68a127da24bf887adcbf19d3110c57630 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 24 Jul 2019 11:33:13 +0100 Subject: [PATCH 32/72] Add a prometheus metric for active cache lookups. (#5750) * Add a prometheus metric for active cache lookups. * changelog --- changelog.d/5750.misc | 1 + synapse/util/caches/__init__.py | 17 ++++++++++++++++- synapse/util/caches/descriptors.py | 18 +++++++++++++++++- 3 files changed, 34 insertions(+), 2 deletions(-) create mode 100644 changelog.d/5750.misc diff --git a/changelog.d/5750.misc b/changelog.d/5750.misc new file mode 100644 index 0000000000..6beaa460a5 --- /dev/null +++ b/changelog.d/5750.misc @@ -0,0 +1 @@ +Add a prometheus metric for pending cache lookups. \ No newline at end of file diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 8271229015..b50e3503f0 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -51,7 +52,19 @@ response_cache_evicted = Gauge( response_cache_total = Gauge("synapse_util_caches_response_cache:total", "", ["name"]) -def register_cache(cache_type, cache_name, cache): +def register_cache(cache_type, cache_name, cache, collect_callback=None): + """Register a cache object for metric collection. + + Args: + cache_type (str): + cache_name (str): name of the cache + cache (object): cache itself + collect_callback (callable|None): if not None, a function which is called during + metric collection to update additional metrics. + + Returns: + CacheMetric: an object which provides inc_{hits,misses,evictions} methods + """ # Check if the metric is already registered. Unregister it, if so. # This usually happens during tests, as at runtime these caches are @@ -90,6 +103,8 @@ def register_cache(cache_type, cache_name, cache): cache_hits.labels(cache_name).set(self.hits) cache_evicted.labels(cache_name).set(self.evicted_size) cache_total.labels(cache_name).set(self.hits + self.misses) + if collect_callback: + collect_callback() except Exception as e: logger.warn("Error calculating metrics for %s: %s", cache_name, e) raise diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index a1acacbde9..7e69cf55fb 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -22,6 +22,8 @@ from collections import namedtuple import six from six import itervalues, string_types +from prometheus_client import Gauge + from twisted.internet import defer from synapse.logging.context import make_deferred_yieldable, preserve_fn @@ -37,6 +39,12 @@ from . import register_cache logger = logging.getLogger(__name__) +cache_pending_metric = Gauge( + "synapse_util_caches_cache_pending", + "Number of lookups currently pending for this cache", + ["name"], +) + _CacheSentinel = object() @@ -82,11 +90,19 @@ class Cache(object): self.name = name self.keylen = keylen self.thread = None - self.metrics = register_cache("cache", name, self.cache) + self.metrics = register_cache( + "cache", + name, + self.cache, + collect_callback=self._metrics_collection_callback, + ) def _on_evicted(self, evicted_count): self.metrics.inc_evictions(evicted_count) + def _metrics_collection_callback(self): + cache_pending_metric.labels(self.name).set(len(self._pending_deferred_cache)) + def check_thread(self): expected_thread = self.thread if expected_thread is None: From 32768e96d44d0f3febae8c372e8c1569ea31788e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 24 Jul 2019 11:37:43 +0100 Subject: [PATCH 33/72] Add function to get all forgotten rooms for user This will allow us to efficiently filter out rooms that have been forgotten in other queries without having to join against the `room_memberships` table. --- synapse/storage/roommember.py | 43 +++++++++++++++++++ .../schema/delta/56/room_membership_idx.sql | 25 +++++++++++ 2 files changed, 68 insertions(+) create mode 100644 synapse/storage/schema/delta/56/room_membership_idx.sql diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index b3c002b9eb..bc77705e97 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -639,6 +639,39 @@ class RoomMemberWorkerStore(EventsWorkerStore): count = yield self.runInteraction("did_forget_membership", f) defer.returnValue(count == 0) + @cached() + def get_forgotten_rooms_for_user(self, user_id): + """Gets all rooms the user has forgotten. + + Args: + user_id (str) + + Returns: + Deferred[set[str]] + """ + + def _get_forgotten_rooms_for_user_txn(txn): + # This is a slightly convoluted query that first looks up all rooms + # that the user has forgotten in the past, then rechecks that list + # to see if any have subsequently been updated. This is done so that + # we can use a partial index on `forgotten = 1` on the assumption + # that few users will actually forget many rooms. + sql = """ + SELECT room_id, ( + SELECT count(*) FROM room_memberships + WHERE room_id = m.room_id AND user_id = m.user_id AND forgotten = 0 + ) AS count + FROM room_memberships AS m + WHERE user_id = ? AND forgotten = 1 + GROUP BY room_id, user_id; + """ + txn.execute(sql, (user_id,)) + return set(row[0] for row in txn if row[1] == 0) + + return self.runInteraction( + "get_forgotten_rooms_for_user", _get_forgotten_rooms_for_user_txn + ) + @defer.inlineCallbacks def get_rooms_user_has_been_in(self, user_id): """Get all rooms that the user has ever been in. @@ -670,6 +703,13 @@ class RoomMemberStore(RoomMemberWorkerStore): _CURRENT_STATE_MEMBERSHIP_UPDATE_NAME, self._background_current_state_membership, ) + self.register_background_index_update( + "room_membership_forgotten_idx", + index_name="room_memberships_user_room_forgotten", + table="room_memberships", + columns=["user_id", "room_id"], + where_clause="forgotten = 1", + ) def _store_room_members_txn(self, txn, events, backfilled): """Store a room member in the database. @@ -771,6 +811,9 @@ class RoomMemberStore(RoomMemberWorkerStore): txn.execute(sql, (user_id, room_id)) self._invalidate_cache_and_stream(txn, self.did_forget, (user_id, room_id)) + self._invalidate_cache_and_stream( + txn, self.get_forgotten_rooms_for_user, (user_id,) + ) return self.runInteraction("forget_membership", f) diff --git a/synapse/storage/schema/delta/56/room_membership_idx.sql b/synapse/storage/schema/delta/56/room_membership_idx.sql new file mode 100644 index 0000000000..fc0b498843 --- /dev/null +++ b/synapse/storage/schema/delta/56/room_membership_idx.sql @@ -0,0 +1,25 @@ +/* Copyright 2019 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- We add membership to current state so that we don't need to join against +-- room_memberships, which can be surprisingly costly (we do such queries +-- very frequently). +-- This will be null for non-membership events and the content.membership key +-- for membership events. (Will also be null for membership events until the +-- background update job has finished). + +-- Adds an index on room_memberships for fetching all forgotten rooms for a user +INSERT INTO background_updates (update_name, progress_json) VALUES + ('room_membership_forgotten_idx', '{}'); From 62921fb53e773c0510aacf36345c5301b4688088 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 24 Jul 2019 11:45:58 +0100 Subject: [PATCH 34/72] Remove join on room_memberships when fetching rooms for user. --- synapse/storage/roommember.py | 58 ++++++++++++++++++++++------------- 1 file changed, 36 insertions(+), 22 deletions(-) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index bc77705e97..7852d3866a 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -256,28 +256,35 @@ class RoomMemberWorkerStore(EventsWorkerStore): defer.returnValue(invite) defer.returnValue(None) + @defer.inlineCallbacks def get_rooms_for_user_where_membership_is(self, user_id, membership_list): """ Get all the rooms for this user where the membership for this user matches one in the membership list. + Filters out forgotten rooms. + Args: user_id (str): The user ID. membership_list (list): A list of synapse.api.constants.Membership values which the user must be in. + Returns: - A list of dictionary objects, with room_id, membership and sender - defined. + Deferred[list[RoomsForUser]] """ if not membership_list: return defer.succeed(None) - return self.runInteraction( + rooms = yield self.runInteraction( "get_rooms_for_user_where_membership_is", self._get_rooms_for_user_where_membership_is_txn, user_id, membership_list, ) + # Now we filter out forgotten rooms + forgotten_rooms = yield self.get_forgotten_rooms_for_user(user_id) + return [room for room in rooms if room.room_id not in forgotten_rooms] + def _get_rooms_for_user_where_membership_is_txn( self, txn, user_id, membership_list ): @@ -287,26 +294,33 @@ class RoomMemberWorkerStore(EventsWorkerStore): results = [] if membership_list: - where_clause = "user_id = ? AND (%s) AND forgotten = 0" % ( - " OR ".join(["m.membership = ?" for _ in membership_list]), - ) + if self._current_state_events_membership_up_to_date: + sql = """ + SELECT room_id, e.sender, c.membership, event_id, e.stream_ordering + FROM current_state_events AS c + INNER JOIN events AS e USING (room_id, event_id) + WHERE + c.type = 'm.room.member' + AND state_key = ? + AND c.membership IN (%s) + """ % ( + ",".join("?" * len(membership_list)) + ) + else: + sql = """ + SELECT room_id, e.sender, m.membership, event_id, e.stream_ordering + FROM current_state_events AS c + INNER JOIN room_memberships AS m USING (room_id, event_id) + INNER JOIN events AS e USING (room_id, event_id) + WHERE + c.type = 'm.room.member' + AND state_key = ? + AND m.membership IN (%s) + """ % ( + ",".join("?" * len(membership_list)) + ) - args = [user_id] - args.extend(membership_list) - - sql = ( - "SELECT m.room_id, m.sender, m.membership, m.event_id, e.stream_ordering" - " FROM current_state_events as c" - " INNER JOIN room_memberships as m" - " ON m.event_id = c.event_id" - " INNER JOIN events as e" - " ON e.event_id = c.event_id" - " AND m.room_id = c.room_id" - " AND m.user_id = c.state_key" - " WHERE c.type = 'm.room.member' AND %s" - ) % (where_clause,) - - txn.execute(sql, args) + txn.execute(sql, (user_id, *membership_list)) results = [RoomsForUser(**r) for r in self.cursor_to_dict(txn)] if do_invite: From 0c4a99607e839c5a363351238a6a555015c8ddfc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 24 Jul 2019 11:49:15 +0100 Subject: [PATCH 35/72] Remove join when calculating room summaries. --- synapse/storage/roommember.py | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 7852d3866a..bfb834ccca 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -179,19 +179,27 @@ class RoomMemberWorkerStore(EventsWorkerStore): # we order by membership and then fairly arbitrarily by event_id so # heroes are consistent - sql = """ - SELECT m.user_id, m.membership, m.event_id - FROM room_memberships as m - INNER JOIN current_state_events as c - ON m.event_id = c.event_id - AND m.room_id = c.room_id - AND m.user_id = c.state_key - WHERE c.type = 'm.room.member' AND c.room_id = ? - ORDER BY - CASE m.membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC, - m.event_id ASC - LIMIT ? - """ + if self._current_state_events_membership_up_to_date: + sql = """ + SELECT state_key, membership, event_id + FROM current_state_events + WHERE type = 'm.room.member' AND room_id = ? + ORDER BY + CASE membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC, + event_id ASC + LIMIT ? + """ + else: + sql = """ + SELECT c.state_key, m.membership, c.event_id + FROM room_memberships as m + INNER JOIN current_state_events as c USING (room_id, event_id) + WHERE c.type = 'm.room.member' AND c.room_id = ? + ORDER BY + CASE m.membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC, + c.event_id ASC + LIMIT ? + """ # 6 is 5 (number of heroes) plus 1, in case one of them is the calling user. txn.execute(sql, (room_id, Membership.JOIN, Membership.INVITE, 6)) From c1598030671ee39364863fef579d041989bf852e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 24 Jul 2019 11:51:44 +0100 Subject: [PATCH 36/72] Newsfile --- changelog.d/5752.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5752.misc diff --git a/changelog.d/5752.misc b/changelog.d/5752.misc new file mode 100644 index 0000000000..5e15dfd5fa --- /dev/null +++ b/changelog.d/5752.misc @@ -0,0 +1 @@ +Reduce database IO usage by optimising queries for current membership. From cf2972c818344214244961e6175f559a6b59123b Mon Sep 17 00:00:00 2001 From: Jorik Schellekens Date: Wed, 24 Jul 2019 13:07:35 +0100 Subject: [PATCH 37/72] Fix servlet metric names (#5734) * Fix servlet metric names Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> * Remove redundant check * Cover all return paths --- changelog.d/5734.bugfix | 1 + synapse/federation/transport/server.py | 4 +- synapse/http/server.py | 41 ++++++++++++++------- synapse/http/servlet.py | 4 +- synapse/replication/http/_base.py | 2 +- synapse/rest/admin/server_notice_servlet.py | 9 ++++- synapse/rest/client/v1/room.py | 37 +++++++++++++++---- synapse/rest/client/v2_alpha/relations.py | 2 + tests/test_server.py | 21 ++++++++--- tests/utils.py | 2 +- 10 files changed, 92 insertions(+), 31 deletions(-) create mode 100644 changelog.d/5734.bugfix diff --git a/changelog.d/5734.bugfix b/changelog.d/5734.bugfix new file mode 100644 index 0000000000..33aea5a94c --- /dev/null +++ b/changelog.d/5734.bugfix @@ -0,0 +1 @@ +Fix a regression introduced in v1.2.0rc1 which led to incorrect labels on some prometheus metrics. diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 663264dec4..ea4e1b6d0f 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -325,7 +325,9 @@ class BaseFederationServlet(object): if code is None: continue - server.register_paths(method, (pattern,), self._wrap(code)) + server.register_paths( + method, (pattern,), self._wrap(code), self.__class__.__name__ + ) class FederationSendServlet(BaseFederationServlet): diff --git a/synapse/http/server.py b/synapse/http/server.py index 72a3d67eb6..e6f351ba3b 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -245,7 +245,9 @@ class JsonResource(HttpServer, resource.Resource): isLeaf = True - _PathEntry = collections.namedtuple("_PathEntry", ["pattern", "callback"]) + _PathEntry = collections.namedtuple( + "_PathEntry", ["pattern", "callback", "servlet_classname"] + ) def __init__(self, hs, canonical_json=True): resource.Resource.__init__(self) @@ -255,12 +257,28 @@ class JsonResource(HttpServer, resource.Resource): self.path_regexs = {} self.hs = hs - def register_paths(self, method, path_patterns, callback): + def register_paths(self, method, path_patterns, callback, servlet_classname): + """ + Registers a request handler against a regular expression. Later request URLs are + checked against these regular expressions in order to identify an appropriate + handler for that request. + + Args: + method (str): GET, POST etc + + path_patterns (Iterable[str]): A list of regular expressions to which + the request URLs are compared. + + callback (function): The handler for the request. Usually a Servlet + + servlet_classname (str): The name of the handler to be used in prometheus + and opentracing logs. + """ method = method.encode("utf-8") # method is bytes on py3 for path_pattern in path_patterns: logger.debug("Registering for %s %s", method, path_pattern.pattern) self.path_regexs.setdefault(method, []).append( - self._PathEntry(path_pattern, callback) + self._PathEntry(path_pattern, callback, servlet_classname) ) def render(self, request): @@ -275,13 +293,9 @@ class JsonResource(HttpServer, resource.Resource): This checks if anyone has registered a callback for that method and path. """ - callback, group_dict = self._get_handler_for_request(request) + callback, servlet_classname, group_dict = self._get_handler_for_request(request) - servlet_instance = getattr(callback, "__self__", None) - if servlet_instance is not None: - servlet_classname = servlet_instance.__class__.__name__ - else: - servlet_classname = "%r" % callback + # Make sure we have a name for this handler in prometheus. request.request_metrics.name = servlet_classname # Now trigger the callback. If it returns a response, we send it @@ -311,7 +325,8 @@ class JsonResource(HttpServer, resource.Resource): request (twisted.web.http.Request): Returns: - Tuple[Callable, dict[unicode, unicode]]: callback method, and the + Tuple[Callable, str, dict[unicode, unicode]]: callback method, the + label to use for that method in prometheus metrics, and the dict mapping keys to path components as specified in the handler's path match regexp. @@ -320,7 +335,7 @@ class JsonResource(HttpServer, resource.Resource): None, or a tuple of (http code, response body). """ if request.method == b"OPTIONS": - return _options_handler, {} + return _options_handler, "options_request_handler", {} # Loop through all the registered callbacks to check if the method # and path regex match @@ -328,10 +343,10 @@ class JsonResource(HttpServer, resource.Resource): m = path_entry.pattern.match(request.path.decode("ascii")) if m: # We found a match! - return path_entry.callback, m.groupdict() + return path_entry.callback, path_entry.servlet_classname, m.groupdict() # Huh. No one wanted to handle that? Fiiiiiine. Send 400. - return _unrecognised_request_handler, {} + return _unrecognised_request_handler, "unrecognised_request_handler", {} def _send_response( self, request, code, response_json_object, response_code_message=None diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index 889038ff25..f0ca7d9aba 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -290,11 +290,13 @@ class RestServlet(object): for method in ("GET", "PUT", "POST", "OPTIONS", "DELETE"): if hasattr(self, "on_%s" % (method,)): + servlet_classname = self.__class__.__name__ method_handler = getattr(self, "on_%s" % (method,)) http_server.register_paths( method, patterns, - trace_servlet(self.__class__.__name__, method_handler), + trace_servlet(servlet_classname, method_handler), + servlet_classname, ) else: diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index fe482e279f..43c89e36dd 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -205,7 +205,7 @@ class ReplicationEndpoint(object): args = "/".join("(?P<%s>[^/]+)" % (arg,) for arg in url_args) pattern = re.compile("^/_synapse/replication/%s/%s$" % (self.NAME, args)) - http_server.register_paths(method, [pattern], handler) + http_server.register_paths(method, [pattern], handler, self.__class__.__name__) def _cached_handler(self, request, txn_id, **kwargs): """Called on new incoming requests when caching is enabled. Checks diff --git a/synapse/rest/admin/server_notice_servlet.py b/synapse/rest/admin/server_notice_servlet.py index ee66838a0d..d9c71261f2 100644 --- a/synapse/rest/admin/server_notice_servlet.py +++ b/synapse/rest/admin/server_notice_servlet.py @@ -59,9 +59,14 @@ class SendServerNoticeServlet(RestServlet): def register(self, json_resource): PATTERN = "^/_synapse/admin/v1/send_server_notice" - json_resource.register_paths("POST", (re.compile(PATTERN + "$"),), self.on_POST) json_resource.register_paths( - "PUT", (re.compile(PATTERN + "/(?P[^/]*)$"),), self.on_PUT + "POST", (re.compile(PATTERN + "$"),), self.on_POST, self.__class__.__name__ + ) + json_resource.register_paths( + "PUT", + (re.compile(PATTERN + "/(?P[^/]*)$"),), + self.on_PUT, + self.__class__.__name__, ) @defer.inlineCallbacks diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 7709c2d705..6276e97f89 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -67,11 +67,17 @@ class RoomCreateRestServlet(TransactionRestServlet): register_txn_path(self, PATTERNS, http_server) # define CORS for all of /rooms in RoomCreateRestServlet for simplicity http_server.register_paths( - "OPTIONS", client_patterns("/rooms(?:/.*)?$", v1=True), self.on_OPTIONS + "OPTIONS", + client_patterns("/rooms(?:/.*)?$", v1=True), + self.on_OPTIONS, + self.__class__.__name__, ) # define CORS for /createRoom[/txnid] http_server.register_paths( - "OPTIONS", client_patterns("/createRoom(?:/.*)?$", v1=True), self.on_OPTIONS + "OPTIONS", + client_patterns("/createRoom(?:/.*)?$", v1=True), + self.on_OPTIONS, + self.__class__.__name__, ) def on_PUT(self, request, txn_id): @@ -116,16 +122,28 @@ class RoomStateEventRestServlet(TransactionRestServlet): ) http_server.register_paths( - "GET", client_patterns(state_key, v1=True), self.on_GET + "GET", + client_patterns(state_key, v1=True), + self.on_GET, + self.__class__.__name__, ) http_server.register_paths( - "PUT", client_patterns(state_key, v1=True), self.on_PUT + "PUT", + client_patterns(state_key, v1=True), + self.on_PUT, + self.__class__.__name__, ) http_server.register_paths( - "GET", client_patterns(no_state_key, v1=True), self.on_GET_no_state_key + "GET", + client_patterns(no_state_key, v1=True), + self.on_GET_no_state_key, + self.__class__.__name__, ) http_server.register_paths( - "PUT", client_patterns(no_state_key, v1=True), self.on_PUT_no_state_key + "PUT", + client_patterns(no_state_key, v1=True), + self.on_PUT_no_state_key, + self.__class__.__name__, ) def on_GET_no_state_key(self, request, room_id, event_type): @@ -845,18 +863,23 @@ def register_txn_path(servlet, regex_string, http_server, with_get=False): with_get: True to also register respective GET paths for the PUTs. """ http_server.register_paths( - "POST", client_patterns(regex_string + "$", v1=True), servlet.on_POST + "POST", + client_patterns(regex_string + "$", v1=True), + servlet.on_POST, + servlet.__class__.__name__, ) http_server.register_paths( "PUT", client_patterns(regex_string + "/(?P[^/]*)$", v1=True), servlet.on_PUT, + servlet.__class__.__name__, ) if with_get: http_server.register_paths( "GET", client_patterns(regex_string + "/(?P[^/]*)$", v1=True), servlet.on_GET, + servlet.__class__.__name__, ) diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py index 6e52f6d284..9e9a639055 100644 --- a/synapse/rest/client/v2_alpha/relations.py +++ b/synapse/rest/client/v2_alpha/relations.py @@ -72,11 +72,13 @@ class RelationSendServlet(RestServlet): "POST", client_patterns(self.PATTERN + "$", releases=()), self.on_PUT_or_POST, + self.__class__.__name__, ) http_server.register_paths( "PUT", client_patterns(self.PATTERN + "/(?P[^/]*)$", releases=()), self.on_PUT, + self.__class__.__name__, ) def on_PUT(self, request, *args, **kwargs): diff --git a/tests/test_server.py b/tests/test_server.py index ba08483a4b..2a7d407c98 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -61,7 +61,10 @@ class JsonResourceTests(unittest.TestCase): res = JsonResource(self.homeserver) res.register_paths( - "GET", [re.compile("^/_matrix/foo/(?P[^/]*)$")], _callback + "GET", + [re.compile("^/_matrix/foo/(?P[^/]*)$")], + _callback, + "test_servlet", ) request, channel = make_request( @@ -82,7 +85,9 @@ class JsonResourceTests(unittest.TestCase): raise Exception("boo") res = JsonResource(self.homeserver) - res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback) + res.register_paths( + "GET", [re.compile("^/_matrix/foo$")], _callback, "test_servlet" + ) request, channel = make_request(self.reactor, b"GET", b"/_matrix/foo") render(request, res, self.reactor) @@ -105,7 +110,9 @@ class JsonResourceTests(unittest.TestCase): return make_deferred_yieldable(d) res = JsonResource(self.homeserver) - res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback) + res.register_paths( + "GET", [re.compile("^/_matrix/foo$")], _callback, "test_servlet" + ) request, channel = make_request(self.reactor, b"GET", b"/_matrix/foo") render(request, res, self.reactor) @@ -122,7 +129,9 @@ class JsonResourceTests(unittest.TestCase): raise SynapseError(403, "Forbidden!!one!", Codes.FORBIDDEN) res = JsonResource(self.homeserver) - res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback) + res.register_paths( + "GET", [re.compile("^/_matrix/foo$")], _callback, "test_servlet" + ) request, channel = make_request(self.reactor, b"GET", b"/_matrix/foo") render(request, res, self.reactor) @@ -143,7 +152,9 @@ class JsonResourceTests(unittest.TestCase): self.fail("shouldn't ever get here") res = JsonResource(self.homeserver) - res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback) + res.register_paths( + "GET", [re.compile("^/_matrix/foo$")], _callback, "test_servlet" + ) request, channel = make_request(self.reactor, b"GET", b"/_matrix/foobar") render(request, res, self.reactor) diff --git a/tests/utils.py b/tests/utils.py index 8a94ce0b47..99a3deae21 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -471,7 +471,7 @@ class MockHttpResource(HttpServer): raise KeyError("No event can handle %s" % path) - def register_paths(self, method, path_patterns, callback): + def register_paths(self, method, path_patterns, callback, servlet_name): for path_pattern in path_patterns: self.callbacks.append((method, path_pattern, callback)) From f30a71a67b6605cb0f09975af3befc61090326bd Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 24 Jul 2019 13:16:18 +0100 Subject: [PATCH 38/72] Stop trying to fetch events with event_id=None. (#5753) `None` is not a valid event id, so queuing up a database fetch for it seems like a silly thing to do. I considered making `get_event` return `None` if `event_id is None`, but then its interaction with `allow_none` seemed uninituitive, and strong typing ftw. --- changelog.d/5753.misc | 1 + synapse/handlers/message.py | 8 +++++++- synapse/storage/events_worker.py | 5 ++++- synapse/storage/stats.py | 20 +++++++++++--------- 4 files changed, 23 insertions(+), 11 deletions(-) create mode 100644 changelog.d/5753.misc diff --git a/changelog.d/5753.misc b/changelog.d/5753.misc new file mode 100644 index 0000000000..22bba9ce3c --- /dev/null +++ b/changelog.d/5753.misc @@ -0,0 +1 @@ +Stop trying to fetch events with event_id=None. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 8b27e23378..e951c39fa7 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -378,7 +378,11 @@ class EventCreationHandler(object): # tolerate them in event_auth.check(). prev_state_ids = yield context.get_prev_state_ids(self.store) prev_event_id = prev_state_ids.get((EventTypes.Member, event.sender)) - prev_event = yield self.store.get_event(prev_event_id, allow_none=True) + prev_event = ( + yield self.store.get_event(prev_event_id, allow_none=True) + if prev_event_id + else None + ) if not prev_event or prev_event.membership != Membership.JOIN: logger.warning( ( @@ -521,6 +525,8 @@ class EventCreationHandler(object): """ prev_state_ids = yield context.get_prev_state_ids(self.store) prev_event_id = prev_state_ids.get((event.type, event.state_key)) + if not prev_event_id: + return prev_event = yield self.store.get_event(prev_event_id, allow_none=True) if not prev_event: return diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 44441957db..83fe4764d8 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -139,8 +139,11 @@ class EventsWorkerStore(SQLBaseStore): If there is a mismatch, behave as per allow_none. Returns: - Deferred : A FrozenEvent. + Deferred[EventBase|None] """ + if not isinstance(event_id, str): + raise TypeError("Invalid event event_id %r" % (event_id,)) + events = yield self.get_events_as_list( [event_id], check_redacted=check_redacted, diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py index e893b05ee7..e13efed417 100644 --- a/synapse/storage/stats.py +++ b/synapse/storage/stats.py @@ -211,16 +211,18 @@ class StatsStore(StateDeltasStore): avatar_id = current_state_ids.get((EventTypes.RoomAvatar, "")) canonical_alias_id = current_state_ids.get((EventTypes.CanonicalAlias, "")) + event_ids = [ + join_rules_id, + history_visibility_id, + encryption_id, + name_id, + topic_id, + avatar_id, + canonical_alias_id, + ] + state_events = yield self.get_events( - [ - join_rules_id, - history_visibility_id, - encryption_id, - name_id, - topic_id, - avatar_id, - canonical_alias_id, - ] + [ev for ev in event_ids if ev is not None] ) def _get_or_none(event_id, arg): From 2d573e2e2b7127882ab2c221fd554371e0274c76 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Wed, 24 Jul 2019 13:38:33 +0100 Subject: [PATCH 39/72] 1.2.0rc2 --- CHANGES.md | 9 +++++++++ changelog.d/5734.bugfix | 1 - synapse/__init__.py | 2 +- 3 files changed, 10 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/5734.bugfix diff --git a/CHANGES.md b/CHANGES.md index bb6bcb75ed..be3dc37137 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.2.0rc2 (2019-07-24) +============================= + +Bugfixes +-------- + +- Fix a regression introduced in v1.2.0rc1 which led to incorrect labels on some prometheus metrics. ([\#5734](https://github.com/matrix-org/synapse/issues/5734)) + + Synapse 1.2.0rc1 (2019-07-22) ============================= diff --git a/changelog.d/5734.bugfix b/changelog.d/5734.bugfix deleted file mode 100644 index 33aea5a94c..0000000000 --- a/changelog.d/5734.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a regression introduced in v1.2.0rc1 which led to incorrect labels on some prometheus metrics. diff --git a/synapse/__init__.py b/synapse/__init__.py index f26e49da36..ca14545e4d 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -35,4 +35,4 @@ try: except ImportError: pass -__version__ = "1.2.0rc1" +__version__ = "1.2.0rc2" From c0a1301ccd91cdfbb079f675b3c3dc305f876be7 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Thu, 25 Jul 2019 14:10:32 +0100 Subject: [PATCH 40/72] 1.2.0 --- CHANGES.md | 6 ++++++ debian/changelog | 7 +++++-- synapse/__init__.py | 2 +- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index be3dc37137..d655723326 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 1.2.0 (2019-07-25) +========================== + +No significant changes. + + Synapse 1.2.0rc2 (2019-07-24) ============================= diff --git a/debian/changelog b/debian/changelog index 8aba444f1d..aafdd1cde2 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,4 +1,4 @@ -matrix-synapse-py3 (1.1.0-1) UNRELEASED; urgency=medium +matrix-synapse-py3 (1.2.0) stable; urgency=medium [ Amber Brown ] * Update logging config defaults to match API changes in Synapse. @@ -6,7 +6,10 @@ matrix-synapse-py3 (1.1.0-1) UNRELEASED; urgency=medium [ Richard van der Hoff ] * Add Recommends and Depends for some libraries which you probably want. - -- Erik Johnston Thu, 04 Jul 2019 13:59:02 +0100 + [ Synapse Packaging team ] + * New synapse release 1.2.0. + + -- Synapse Packaging team Thu, 25 Jul 2019 14:10:07 +0100 matrix-synapse-py3 (1.1.0) stable; urgency=medium diff --git a/synapse/__init__.py b/synapse/__init__.py index ca14545e4d..3435de4e2f 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -35,4 +35,4 @@ try: except ImportError: pass -__version__ = "1.2.0rc2" +__version__ = "1.2.0" From 618bd1ee76a83bd29beb208e9b7097ffcd787099 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 25 Jul 2019 15:59:45 +0100 Subject: [PATCH 41/72] Fix some error cases in the caching layer. (#5749) There was some inconsistent behaviour in the caching layer around how exceptions were handled - particularly synchronously-thrown ones. This seems to be most easily handled by pushing the creation of ObservableDeferreds down from CacheDescriptor to the Cache. --- changelog.d/5749.misc | 1 + synapse/util/caches/descriptors.py | 74 ++++++++++++---------- tests/util/caches/test_descriptors.py | 90 ++++++++++++++++++++++++++- 3 files changed, 130 insertions(+), 35 deletions(-) create mode 100644 changelog.d/5749.misc diff --git a/changelog.d/5749.misc b/changelog.d/5749.misc new file mode 100644 index 0000000000..48dd61f461 --- /dev/null +++ b/changelog.d/5749.misc @@ -0,0 +1 @@ +Fix some error cases in the caching layer. diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 7e69cf55fb..43f66ec4be 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -19,8 +19,7 @@ import logging import threading from collections import namedtuple -import six -from six import itervalues, string_types +from six import itervalues from prometheus_client import Gauge @@ -32,7 +31,6 @@ from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches import get_cache_factor_for from synapse.util.caches.lrucache import LruCache from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry -from synapse.util.stringutils import to_ascii from . import register_cache @@ -124,7 +122,7 @@ class Cache(object): update_metrics (bool): whether to update the cache hit rate metrics Returns: - Either a Deferred or the raw result + Either an ObservableDeferred or the raw result """ callbacks = [callback] if callback else [] val = self._pending_deferred_cache.get(key, _CacheSentinel) @@ -148,9 +146,14 @@ class Cache(object): return default def set(self, key, value, callback=None): + if not isinstance(value, defer.Deferred): + raise TypeError("not a Deferred") + callbacks = [callback] if callback else [] self.check_thread() - entry = CacheEntry(deferred=value, callbacks=callbacks) + observable = ObservableDeferred(value, consumeErrors=True) + observer = defer.maybeDeferred(observable.observe) + entry = CacheEntry(deferred=observable, callbacks=callbacks) existing_entry = self._pending_deferred_cache.pop(key, None) if existing_entry: @@ -158,20 +161,31 @@ class Cache(object): self._pending_deferred_cache[key] = entry - def shuffle(result): + def compare_and_pop(): + """Check if our entry is still the one in _pending_deferred_cache, and + if so, pop it. + + Returns true if the entries matched. + """ existing_entry = self._pending_deferred_cache.pop(key, None) if existing_entry is entry: + return True + + # oops, the _pending_deferred_cache has been updated since + # we started our query, so we are out of date. + # + # Better put back whatever we took out. (We do it this way + # round, rather than peeking into the _pending_deferred_cache + # and then removing on a match, to make the common case faster) + if existing_entry is not None: + self._pending_deferred_cache[key] = existing_entry + + return False + + def cb(result): + if compare_and_pop(): self.cache.set(key, result, entry.callbacks) else: - # oops, the _pending_deferred_cache has been updated since - # we started our query, so we are out of date. - # - # Better put back whatever we took out. (We do it this way - # round, rather than peeking into the _pending_deferred_cache - # and then removing on a match, to make the common case faster) - if existing_entry is not None: - self._pending_deferred_cache[key] = existing_entry - # we're not going to put this entry into the cache, so need # to make sure that the invalidation callbacks are called. # That was probably done when _pending_deferred_cache was @@ -179,9 +193,16 @@ class Cache(object): # `invalidate` being previously called, in which case it may # not have been. Either way, let's double-check now. entry.invalidate() - return result - entry.deferred.addCallback(shuffle) + def eb(_fail): + compare_and_pop() + entry.invalidate() + + # once the deferred completes, we can move the entry from the + # _pending_deferred_cache to the real cache. + # + observer.addCallbacks(cb, eb) + return observable def prefill(self, key, value, callback=None): callbacks = [callback] if callback else [] @@ -414,20 +435,10 @@ class CacheDescriptor(_CacheDescriptorBase): ret.addErrback(onErr) - # If our cache_key is a string on py2, try to convert to ascii - # to save a bit of space in large caches. Py3 does this - # internally automatically. - if six.PY2 and isinstance(cache_key, string_types): - cache_key = to_ascii(cache_key) - - result_d = ObservableDeferred(ret, consumeErrors=True) - cache.set(cache_key, result_d, callback=invalidate_callback) + result_d = cache.set(cache_key, ret, callback=invalidate_callback) observer = result_d.observe() - if isinstance(observer, defer.Deferred): - return make_deferred_yieldable(observer) - else: - return observer + return make_deferred_yieldable(observer) if self.num_args == 1: wrapped.invalidate = lambda key: cache.invalidate(key[0]) @@ -543,7 +554,7 @@ class CacheListDescriptor(_CacheDescriptorBase): missing.add(arg) if missing: - # we need an observable deferred for each entry in the list, + # we need a deferred for each entry in the list, # which we put in the cache. Each deferred resolves with the # relevant result for that key. deferreds_map = {} @@ -551,8 +562,7 @@ class CacheListDescriptor(_CacheDescriptorBase): deferred = defer.Deferred() deferreds_map[arg] = deferred key = arg_to_cache_key(arg) - observable = ObservableDeferred(deferred) - cache.set(key, observable, callback=invalidate_callback) + cache.set(key, deferred, callback=invalidate_callback) def complete_all(res): # the wrapped function has completed. It returns a diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 56320bbaf9..5713870f48 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -27,6 +27,7 @@ from synapse.logging.context import ( make_deferred_yieldable, ) from synapse.util.caches import descriptors +from synapse.util.caches.descriptors import cached from tests import unittest @@ -55,12 +56,15 @@ class CacheTestCase(unittest.TestCase): d2 = defer.Deferred() cache.set("key2", d2, partial(record_callback, 1)) - # lookup should return the deferreds - self.assertIs(cache.get("key1"), d1) - self.assertIs(cache.get("key2"), d2) + # lookup should return observable deferreds + self.assertFalse(cache.get("key1").has_called()) + self.assertFalse(cache.get("key2").has_called()) # let one of the lookups complete d2.callback("result2") + + # for now at least, the cache will return real results rather than an + # observabledeferred self.assertEqual(cache.get("key2"), "result2") # now do the invalidation @@ -146,6 +150,28 @@ class DescriptorTestCase(unittest.TestCase): self.assertEqual(r, "chips") obj.mock.assert_not_called() + def test_cache_with_sync_exception(self): + """If the wrapped function throws synchronously, things should continue to work + """ + + class Cls(object): + @cached() + def fn(self, arg1): + raise SynapseError(100, "mai spoon iz too big!!1") + + obj = Cls() + + # this should fail immediately + d = obj.fn(1) + self.failureResultOf(d, SynapseError) + + # ... leaving the cache empty + self.assertEqual(len(obj.fn.cache.cache), 0) + + # and a second call should result in a second exception + d = obj.fn(1) + self.failureResultOf(d, SynapseError) + def test_cache_logcontexts(self): """Check that logcontexts are set and restored correctly when using the cache.""" @@ -222,6 +248,9 @@ class DescriptorTestCase(unittest.TestCase): self.assertEqual(LoggingContext.current_context(), c1) + # the cache should now be empty + self.assertEqual(len(obj.fn.cache.cache), 0) + obj = Cls() # set off a deferred which will do a cache lookup @@ -268,6 +297,61 @@ class DescriptorTestCase(unittest.TestCase): self.assertEqual(r, "chips") obj.mock.assert_not_called() + def test_cache_iterable(self): + class Cls(object): + def __init__(self): + self.mock = mock.Mock() + + @descriptors.cached(iterable=True) + def fn(self, arg1, arg2): + return self.mock(arg1, arg2) + + obj = Cls() + + obj.mock.return_value = ["spam", "eggs"] + r = obj.fn(1, 2) + self.assertEqual(r, ["spam", "eggs"]) + obj.mock.assert_called_once_with(1, 2) + obj.mock.reset_mock() + + # a call with different params should call the mock again + obj.mock.return_value = ["chips"] + r = obj.fn(1, 3) + self.assertEqual(r, ["chips"]) + obj.mock.assert_called_once_with(1, 3) + obj.mock.reset_mock() + + # the two values should now be cached + self.assertEqual(len(obj.fn.cache.cache), 3) + + r = obj.fn(1, 2) + self.assertEqual(r, ["spam", "eggs"]) + r = obj.fn(1, 3) + self.assertEqual(r, ["chips"]) + obj.mock.assert_not_called() + + def test_cache_iterable_with_sync_exception(self): + """If the wrapped function throws synchronously, things should continue to work + """ + + class Cls(object): + @descriptors.cached(iterable=True) + def fn(self, arg1): + raise SynapseError(100, "mai spoon iz too big!!1") + + obj = Cls() + + # this should fail immediately + d = obj.fn(1) + self.failureResultOf(d, SynapseError) + + # ... leaving the cache empty + self.assertEqual(len(obj.fn.cache.cache), 0) + + # and a second call should result in a second exception + d = obj.fn(1) + self.failureResultOf(d, SynapseError) + class CachedListDescriptorTestCase(unittest.TestCase): @defer.inlineCallbacks From b1605cdd23a37701c55906c9118e43b4d32ceb7f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 24 Jul 2019 22:44:39 +0100 Subject: [PATCH 42/72] log when a redaction attempts to redact an event in a different room --- changelog.d/5767.bugfix | 1 + synapse/storage/events_worker.py | 27 +++++++++++++++++++++++++++ 2 files changed, 28 insertions(+) create mode 100644 changelog.d/5767.bugfix diff --git a/changelog.d/5767.bugfix b/changelog.d/5767.bugfix new file mode 100644 index 0000000000..1a76d02e32 --- /dev/null +++ b/changelog.d/5767.bugfix @@ -0,0 +1 @@ +Log when a redaction attempts to redact an event in a different room. diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 858fc755a1..7dbb5df09a 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -268,6 +268,14 @@ class EventsWorkerStore(SQLBaseStore): ) continue + if original_event.room_id != entry.event.room_id: + logger.info( + "Withholding redaction %s of event %s from a different room", + event_id, + redacted_event_id, + ) + continue + if entry.event.internal_metadata.need_to_check_redaction(): original_domain = get_domain_from_id(original_event.sender) redaction_domain = get_domain_from_id(entry.event.sender) @@ -636,9 +644,21 @@ class EventsWorkerStore(SQLBaseStore): if not redaction_entry: # we don't have the redaction event, or the redaction event was not # authorized. + logger.debug( + "%s was redacted by %s but redaction not found/authed", + original_ev.event_id, + redaction_id, + ) continue redaction_event = redaction_entry.event + if redaction_event.room_id != original_ev.room_id: + logger.debug( + "%s was redacted by %s but redaction was in a different room!", + original_ev.event_id, + redaction_id, + ) + continue # Starting in room version v3, some redactions need to be # rechecked if we didn't have the redacted event at the @@ -650,8 +670,15 @@ class EventsWorkerStore(SQLBaseStore): redaction_event.internal_metadata.recheck_redaction = False else: # Senders don't match, so the event isn't actually redacted + logger.debug( + "%s was redacted by %s but the senders don't match", + original_ev.event_id, + redaction_id, + ) continue + logger.debug("Redacting %s due to %s", original_ev.event_id, redaction_id) + # we found a good redaction event. Redact! redacted_event = prune_event(original_ev) redacted_event.unsigned["redacted_by"] = redaction_id From 0f2ecb961e580d5d039360edf041720680f8ad8c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 26 Jul 2019 06:36:48 +0000 Subject: [PATCH 43/72] Fix DoS when there is a cycle in redaction events Make sure that synapse doesn't explode when a redaction redacts itself, or there is a larger cycle. --- synapse/storage/events_worker.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 7dbb5df09a..06379281b6 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -637,6 +637,10 @@ class EventsWorkerStore(SQLBaseStore): # we choose to ignore redactions of m.room.create events. return None + if original_ev.type == "m.room.redaction": + # ... and redaction events + return None + redaction_map = yield self._get_events_from_cache_or_db(redactions) for redaction_id in redactions: From 1cad8d7b6f736d86bd53b7f5e8b8417c302fdbd1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 26 Jul 2019 07:38:55 +0100 Subject: [PATCH 44/72] Convert RedactionTestCase to modern test style (#5768) --- changelog.d/5768.misc | 1 + tests/storage/test_redaction.py | 76 +++++++++++++++++---------------- 2 files changed, 40 insertions(+), 37 deletions(-) create mode 100644 changelog.d/5768.misc diff --git a/changelog.d/5768.misc b/changelog.d/5768.misc new file mode 100644 index 0000000000..7a9c88b4c2 --- /dev/null +++ b/changelog.d/5768.misc @@ -0,0 +1 @@ +Convert RedactionTestCase to modern test style. diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py index 1cb471205b..8488b6edc8 100644 --- a/tests/storage/test_redaction.py +++ b/tests/storage/test_redaction.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,23 +17,21 @@ from mock import Mock -from twisted.internet import defer - from synapse.api.constants import EventTypes, Membership from synapse.api.room_versions import RoomVersions from synapse.types import RoomID, UserID from tests import unittest -from tests.utils import create_room, setup_test_homeserver +from tests.utils import create_room -class RedactionTestCase(unittest.TestCase): - @defer.inlineCallbacks - def setUp(self): - hs = yield setup_test_homeserver( - self.addCleanup, resource_for_federation=Mock(), http_client=None +class RedactionTestCase(unittest.HomeserverTestCase): + def make_homeserver(self, reactor, clock): + return self.setup_test_homeserver( + resource_for_federation=Mock(), http_client=None ) + def prepare(self, reactor, clock, hs): self.store = hs.get_datastore() self.event_builder_factory = hs.get_event_builder_factory() self.event_creation_handler = hs.get_event_creation_handler() @@ -42,11 +41,12 @@ class RedactionTestCase(unittest.TestCase): self.room1 = RoomID.from_string("!abc123:test") - yield create_room(hs, self.room1.to_string(), self.u_alice.to_string()) + self.get_success( + create_room(hs, self.room1.to_string(), self.u_alice.to_string()) + ) self.depth = 1 - @defer.inlineCallbacks def inject_room_member( self, room, user, membership, replaces_state=None, extra_content={} ): @@ -63,15 +63,14 @@ class RedactionTestCase(unittest.TestCase): }, ) - event, context = yield self.event_creation_handler.create_new_client_event( - builder + event, context = self.get_success( + self.event_creation_handler.create_new_client_event(builder) ) - yield self.store.persist_event(event, context) + self.get_success(self.store.persist_event(event, context)) return event - @defer.inlineCallbacks def inject_message(self, room, user, body): self.depth += 1 @@ -86,15 +85,14 @@ class RedactionTestCase(unittest.TestCase): }, ) - event, context = yield self.event_creation_handler.create_new_client_event( - builder + event, context = self.get_success( + self.event_creation_handler.create_new_client_event(builder) ) - yield self.store.persist_event(event, context) + self.get_success(self.store.persist_event(event, context)) return event - @defer.inlineCallbacks def inject_redaction(self, room, event_id, user, reason): builder = self.event_builder_factory.for_room_version( RoomVersions.V1, @@ -108,20 +106,21 @@ class RedactionTestCase(unittest.TestCase): }, ) - event, context = yield self.event_creation_handler.create_new_client_event( - builder + event, context = self.get_success( + self.event_creation_handler.create_new_client_event(builder) ) - yield self.store.persist_event(event, context) + self.get_success(self.store.persist_event(event, context)) - @defer.inlineCallbacks def test_redact(self): - yield self.inject_room_member(self.room1, self.u_alice, Membership.JOIN) + self.get_success( + self.inject_room_member(self.room1, self.u_alice, Membership.JOIN) + ) - msg_event = yield self.inject_message(self.room1, self.u_alice, "t") + msg_event = self.get_success(self.inject_message(self.room1, self.u_alice, "t")) # Check event has not been redacted: - event = yield self.store.get_event(msg_event.event_id) + event = self.get_success(self.store.get_event(msg_event.event_id)) self.assertObjectHasAttributes( { @@ -136,11 +135,11 @@ class RedactionTestCase(unittest.TestCase): # Redact event reason = "Because I said so" - yield self.inject_redaction( - self.room1, msg_event.event_id, self.u_alice, reason + self.get_success( + self.inject_redaction(self.room1, msg_event.event_id, self.u_alice, reason) ) - event = yield self.store.get_event(msg_event.event_id) + event = self.get_success(self.store.get_event(msg_event.event_id)) self.assertEqual(msg_event.event_id, event.event_id) @@ -164,15 +163,18 @@ class RedactionTestCase(unittest.TestCase): event.unsigned["redacted_because"], ) - @defer.inlineCallbacks def test_redact_join(self): - yield self.inject_room_member(self.room1, self.u_alice, Membership.JOIN) - - msg_event = yield self.inject_room_member( - self.room1, self.u_bob, Membership.JOIN, extra_content={"blue": "red"} + self.get_success( + self.inject_room_member(self.room1, self.u_alice, Membership.JOIN) ) - event = yield self.store.get_event(msg_event.event_id) + msg_event = self.get_success( + self.inject_room_member( + self.room1, self.u_bob, Membership.JOIN, extra_content={"blue": "red"} + ) + ) + + event = self.get_success(self.store.get_event(msg_event.event_id)) self.assertObjectHasAttributes( { @@ -187,13 +189,13 @@ class RedactionTestCase(unittest.TestCase): # Redact event reason = "Because I said so" - yield self.inject_redaction( - self.room1, msg_event.event_id, self.u_alice, reason + self.get_success( + self.inject_redaction(self.room1, msg_event.event_id, self.u_alice, reason) ) # Check redaction - event = yield self.store.get_event(msg_event.event_id) + event = self.get_success(self.store.get_event(msg_event.event_id)) self.assertTrue("redacted_because" in event.unsigned) From 1f8bae7724713a36b7602fde439f316f1ff5b8cf Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Jul 2019 13:31:03 +0100 Subject: [PATCH 45/72] Log when we receive receipt from a different origin --- synapse/handlers/receipts.py | 35 ++++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index a85dd8cdee..e58bf7e360 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -17,7 +17,7 @@ import logging from twisted.internet import defer from synapse.handlers._base import BaseHandler -from synapse.types import ReadReceipt +from synapse.types import ReadReceipt, get_domain_from_id logger = logging.getLogger(__name__) @@ -40,18 +40,27 @@ class ReceiptsHandler(BaseHandler): def _received_remote_receipt(self, origin, content): """Called when we receive an EDU of type m.receipt from a remote HS. """ - receipts = [ - ReadReceipt( - room_id=room_id, - receipt_type=receipt_type, - user_id=user_id, - event_ids=user_values["event_ids"], - data=user_values.get("data", {}), - ) - for room_id, room_values in content.items() - for receipt_type, users in room_values.items() - for user_id, user_values in users.items() - ] + receipts = [] + for room_id, room_values in content.items(): + for receipt_type, users in room_values.items(): + for user_id, user_values in users.items(): + if get_domain_from_id(user_id) != origin: + logger.info( + "Received receipt for user %r from server %s, ignoring", + user_id, + origin, + ) + continue + + receipts.append( + ReadReceipt( + room_id=room_id, + receipt_type=receipt_type, + user_id=user_id, + event_ids=user_values["event_ids"], + data=user_values.get("data", {}), + ) + ) yield self._handle_new_receipts(receipts) From d1020653fcbecabcf8e109dafc6258b1f2c2afd0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 26 Jul 2019 10:08:22 +0100 Subject: [PATCH 46/72] Log when we receive a /make_* request from a different origin --- changelog.d/5744.bugfix | 1 + synapse/federation/federation_server.py | 4 +-- synapse/handlers/federation.py | 37 +++++++++++++++++++++++-- 3 files changed, 38 insertions(+), 4 deletions(-) create mode 100644 changelog.d/5744.bugfix diff --git a/changelog.d/5744.bugfix b/changelog.d/5744.bugfix new file mode 100644 index 0000000000..7b67ebb2d3 --- /dev/null +++ b/changelog.d/5744.bugfix @@ -0,0 +1 @@ +Log when we receive a `/make_*` request from a different origin. diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 8c0a18b120..ed2b6d5eef 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -369,7 +369,7 @@ class FederationServer(FederationBase): logger.warn("Room version %s not in %s", room_version, supported_versions) raise IncompatibleRoomVersionError(room_version=room_version) - pdu = yield self.handler.on_make_join_request(room_id, user_id) + pdu = yield self.handler.on_make_join_request(origin, room_id, user_id) time_now = self._clock.time_msec() defer.returnValue( {"event": pdu.get_pdu_json(time_now), "room_version": room_version} @@ -423,7 +423,7 @@ class FederationServer(FederationBase): def on_make_leave_request(self, origin, room_id, user_id): origin_host, _ = parse_server_name(origin) yield self.check_server_matches_acl(origin_host, room_id) - pdu = yield self.handler.on_make_leave_request(room_id, user_id) + pdu = yield self.handler.on_make_leave_request(origin, room_id, user_id) room_version = yield self.store.get_room_version(room_id) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 57be968c67..30b69af82c 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1204,11 +1204,28 @@ class FederationHandler(BaseHandler): @defer.inlineCallbacks @log_function - def on_make_join_request(self, room_id, user_id): + def on_make_join_request(self, origin, room_id, user_id): """ We've received a /make_join/ request, so we create a partial join event for the room and return that. We do *not* persist or process it until the other server has signed it and sent it back. + + Args: + origin (str): The (verified) server name of the requesting server. + room_id (str): Room to create join event in + user_id (str): The user to create the join for + + Returns: + Deferred[FrozenEvent] """ + + if get_domain_from_id(user_id) != origin: + logger.info( + "Got /make_join request for user %r from different origin %s, ignoring", + user_id, + origin, + ) + raise SynapseError(403, "User not from origin", Codes.FORBIDDEN) + event_content = {"membership": Membership.JOIN} room_version = yield self.store.get_room_version(room_id) @@ -1411,11 +1428,27 @@ class FederationHandler(BaseHandler): @defer.inlineCallbacks @log_function - def on_make_leave_request(self, room_id, user_id): + def on_make_leave_request(self, origin, room_id, user_id): """ We've received a /make_leave/ request, so we create a partial leave event for the room and return that. We do *not* persist or process it until the other server has signed it and sent it back. + + Args: + origin (str): The (verified) server name of the requesting server. + room_id (str): Room to create leave event in + user_id (str): The user to create the leave for + + Returns: + Deferred[FrozenEvent] """ + if get_domain_from_id(user_id) != origin: + logger.info( + "Got /make_leave request for user %r from different origin %s, ignoring", + user_id, + origin, + ) + raise SynapseError(403, "User not from origin", Codes.FORBIDDEN) + room_version = yield self.store.get_room_version(room_id) builder = self.event_builder_factory.new( room_version, From 14c24c9037a7be46f9f79e85d2ce303ada4085e9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 26 Jul 2019 10:07:21 +0100 Subject: [PATCH 47/72] Fix room summary when rejected events are in state Annoyingly, `current_state_events` table can include rejected events, in which case the membership column will be null. To work around this lets just always filter out null membership for now. --- synapse/storage/roommember.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index bfb834ccca..d0fe3a7f78 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -156,9 +156,12 @@ class RoomMemberWorkerStore(EventsWorkerStore): # then we can avoid a join, which is a Very Good Thing given how # frequently this function gets called. if self._current_state_events_membership_up_to_date: + # Note, rejected events will have a null membership field, so + # we we manually filter them out. sql = """ SELECT count(*), membership FROM current_state_events WHERE type = 'm.room.member' AND room_id = ? + AND membership IS NOT NULL GROUP BY membership """ else: @@ -180,10 +183,13 @@ class RoomMemberWorkerStore(EventsWorkerStore): # we order by membership and then fairly arbitrarily by event_id so # heroes are consistent if self._current_state_events_membership_up_to_date: + # Note, rejected events will have a null membership field, so + # we we manually filter them out. sql = """ SELECT state_key, membership, event_id FROM current_state_events WHERE type = 'm.room.member' AND room_id = ? + AND membership IS NOT NULL ORDER BY CASE membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC, event_id ASC From 2e9cf7dda5aa5a13e434bf85733747d3d9c2d8e5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 26 Jul 2019 10:14:31 +0100 Subject: [PATCH 48/72] Newsfile --- changelog.d/5774.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5774.misc diff --git a/changelog.d/5774.misc b/changelog.d/5774.misc new file mode 100644 index 0000000000..5e15dfd5fa --- /dev/null +++ b/changelog.d/5774.misc @@ -0,0 +1 @@ +Reduce database IO usage by optimising queries for current membership. From dde6ea7ff6b18bb1da697a365326f34ea33baf88 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 26 Jul 2019 11:33:16 +0100 Subject: [PATCH 49/72] 1.2.1 --- CHANGES.md | 24 +++++++++++++++++++++++- changelog.d/5744.bugfix | 1 - changelog.d/5767.bugfix | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 5 files changed, 30 insertions(+), 4 deletions(-) delete mode 100644 changelog.d/5744.bugfix delete mode 100644 changelog.d/5767.bugfix diff --git a/CHANGES.md b/CHANGES.md index d655723326..ec79279539 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,18 @@ +Synapse 1.2.1 (2019-07-26) +========================== + +Security update +--------------- + +This release includes *four* security fixes: + +- Prevent an attack where a federated server could send redactions for arbitrary events in v1 and v2 rooms. ([\#5767](https://github.com/matrix-org/synapse/issues/5767)) +- Prevent a denial-of-service attack where cycles of redaction events would make Synapse spin infinitely. Thanks to `@lrizika:matrix.org` for identifying and responsibly disclosing this issue. ([0f2ecb961](https://github.com/matrix-org/synapse/commit/0f2ecb961)) +- Prevent an attack where users could be joined or parted from public rooms without their consent. Thanks to @Dylanger for identifying and responsibly disclosing this issue. ([\#5744](https://github.com/matrix-org/synapse/issues/5744)) +- Fix a vulnerability where a federated server could spoof read-receipts from users on other servers. ([\#5743](https://github.com/matrix-org/synapse/issues/5743)) + +Note that Synapse 1.2.0 also contained a security fix which was not correctly identified during the original release. The changelog below is now updated. + Synapse 1.2.0 (2019-07-25) ========================== @@ -16,6 +31,14 @@ Bugfixes Synapse 1.2.0rc1 (2019-07-22) ============================= +Security fixes +-------------- + +This update included a security fix which was initially incorrectly flagged as +a regular bug fix. + +- It was possible for a room moderator to send a redaction for an `m.room.create` event, which would downgrade the room to version 1. Thanks to `/dev/ponies` for identifying and responsibly disclosing this issue! ([\#5701](https://github.com/matrix-org/synapse/issues/5701)) + Features -------- @@ -41,7 +64,6 @@ Bugfixes - Fix bug in #5626 that prevented the original_event field from actually having the contents of the original event in a call to `/relations`. ([\#5654](https://github.com/matrix-org/synapse/issues/5654)) - Fix 3PID bind requests being sent to identity servers as `application/x-form-www-urlencoded` data, which is deprecated. ([\#5658](https://github.com/matrix-org/synapse/issues/5658)) - Fix some problems with authenticating redactions in recent room versions. ([\#5699](https://github.com/matrix-org/synapse/issues/5699), [\#5700](https://github.com/matrix-org/synapse/issues/5700), [\#5707](https://github.com/matrix-org/synapse/issues/5707)) -- Ignore redactions of m.room.create events. ([\#5701](https://github.com/matrix-org/synapse/issues/5701)) Updates to the Docker image diff --git a/changelog.d/5744.bugfix b/changelog.d/5744.bugfix deleted file mode 100644 index 7b67ebb2d3..0000000000 --- a/changelog.d/5744.bugfix +++ /dev/null @@ -1 +0,0 @@ -Log when we receive a `/make_*` request from a different origin. diff --git a/changelog.d/5767.bugfix b/changelog.d/5767.bugfix deleted file mode 100644 index 1a76d02e32..0000000000 --- a/changelog.d/5767.bugfix +++ /dev/null @@ -1 +0,0 @@ -Log when a redaction attempts to redact an event in a different room. diff --git a/debian/changelog b/debian/changelog index aafdd1cde2..6634c1085a 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.2.1) stable; urgency=medium + + * New synapse release 1.2.1. + + -- Synapse Packaging team Fri, 26 Jul 2019 11:32:47 +0100 + matrix-synapse-py3 (1.2.0) stable; urgency=medium [ Amber Brown ] diff --git a/synapse/__init__.py b/synapse/__init__.py index 3435de4e2f..8301a13d8f 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -35,4 +35,4 @@ try: except ImportError: pass -__version__ = "1.2.0" +__version__ = "1.2.1" From 8b16696b24a9cf22b3583c4dca733c0db527b4a0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 26 Jul 2019 11:36:28 +0100 Subject: [PATCH 50/72] correct attributions in changelog --- CHANGES.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index ec79279539..2814d9586a 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -9,7 +9,8 @@ This release includes *four* security fixes: - Prevent an attack where a federated server could send redactions for arbitrary events in v1 and v2 rooms. ([\#5767](https://github.com/matrix-org/synapse/issues/5767)) - Prevent a denial-of-service attack where cycles of redaction events would make Synapse spin infinitely. Thanks to `@lrizika:matrix.org` for identifying and responsibly disclosing this issue. ([0f2ecb961](https://github.com/matrix-org/synapse/commit/0f2ecb961)) - Prevent an attack where users could be joined or parted from public rooms without their consent. Thanks to @Dylanger for identifying and responsibly disclosing this issue. ([\#5744](https://github.com/matrix-org/synapse/issues/5744)) -- Fix a vulnerability where a federated server could spoof read-receipts from users on other servers. ([\#5743](https://github.com/matrix-org/synapse/issues/5743)) +- Fix a vulnerability where a federated server could spoof read-receipts from + users on other servers. Thanks to @Dylanger for identifying this issue too. ([\#5743](https://github.com/matrix-org/synapse/issues/5743)) Note that Synapse 1.2.0 also contained a security fix which was not correctly identified during the original release. The changelog below is now updated. From 992333b9954bec62e87cf5bf103fb5988d8bb063 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 26 Jul 2019 11:36:28 +0100 Subject: [PATCH 51/72] correct attributions in changelog --- CHANGES.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 2814d9586a..fa753bed17 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -12,7 +12,10 @@ This release includes *four* security fixes: - Fix a vulnerability where a federated server could spoof read-receipts from users on other servers. Thanks to @Dylanger for identifying this issue too. ([\#5743](https://github.com/matrix-org/synapse/issues/5743)) -Note that Synapse 1.2.0 also contained a security fix which was not correctly identified during the original release. The changelog below is now updated. +Additionally, the following fix was in Synapse **1.2.0**, but was not correctly +identified during the original release: + +- It was possible for a room moderator to send a redaction for an `m.room.create` event, which would downgrade the room to version 1. Thanks to `/dev/ponies` for identifying and responsibly disclosing this issue! ([\#5701](https://github.com/matrix-org/synapse/issues/5701)) Synapse 1.2.0 (2019-07-25) ========================== From 97bf3077550915161765fdd1cf9290d8039a55f9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 26 Jul 2019 12:06:06 +0100 Subject: [PATCH 52/72] yet more changelog attribution fixes --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index fa753bed17..7bdc7ae6cc 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -8,9 +8,9 @@ This release includes *four* security fixes: - Prevent an attack where a federated server could send redactions for arbitrary events in v1 and v2 rooms. ([\#5767](https://github.com/matrix-org/synapse/issues/5767)) - Prevent a denial-of-service attack where cycles of redaction events would make Synapse spin infinitely. Thanks to `@lrizika:matrix.org` for identifying and responsibly disclosing this issue. ([0f2ecb961](https://github.com/matrix-org/synapse/commit/0f2ecb961)) -- Prevent an attack where users could be joined or parted from public rooms without their consent. Thanks to @Dylanger for identifying and responsibly disclosing this issue. ([\#5744](https://github.com/matrix-org/synapse/issues/5744)) +- Prevent an attack where users could be joined or parted from public rooms without their consent. Thanks to @dylangerdaly for identifying and responsibly disclosing this issue. ([\#5744](https://github.com/matrix-org/synapse/issues/5744)) - Fix a vulnerability where a federated server could spoof read-receipts from - users on other servers. Thanks to @Dylanger for identifying this issue too. ([\#5743](https://github.com/matrix-org/synapse/issues/5743)) + users on other servers. Thanks to @dylangerdaly for identifying this issue too. ([\#5743](https://github.com/matrix-org/synapse/issues/5743)) Additionally, the following fix was in Synapse **1.2.0**, but was not correctly identified during the original release: From 08352d44f81a76ba53fc96753cc5038589defaa7 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 26 Jul 2019 18:45:31 +0200 Subject: [PATCH 53/72] Add ability to pass arguments to looping calls --- synapse/util/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 841625a991..9e0a47d206 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -59,7 +59,7 @@ class Clock(object): """Returns the current system time in miliseconds since epoch.""" return int(self.time() * 1000) - def looping_call(self, f, msec): + def looping_call(self, f, msec, *args): """Call a function repeatedly. Waits `msec` initially before calling `f` for the first time. @@ -71,7 +71,7 @@ class Clock(object): f(function): The function to call repeatedly. msec(float): How long to wait between calls in milliseconds. """ - call = task.LoopingCall(f) + call = task.LoopingCall(f, *args) call.clock = self._reactor d = call.start(msec / 1000.0, now=False) d.addErrback(log_failure, "Looping call died", consumeErrors=False) From 244953be3f2532fbb57f5ecd09ed499bcb1e1c69 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 29 Jul 2019 10:03:14 +0200 Subject: [PATCH 54/72] Add kwargs and doc --- synapse/util/__init__.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 9e0a47d206..7856353002 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -59,7 +59,7 @@ class Clock(object): """Returns the current system time in miliseconds since epoch.""" return int(self.time() * 1000) - def looping_call(self, f, msec, *args): + def looping_call(self, f, msec, *args, **kwargs): """Call a function repeatedly. Waits `msec` initially before calling `f` for the first time. @@ -70,8 +70,10 @@ class Clock(object): Args: f(function): The function to call repeatedly. msec(float): How long to wait between calls in milliseconds. + *args: Postional arguments to pass to function. + **kwargs: Key arguments to pass to function. """ - call = task.LoopingCall(f, *args) + call = task.LoopingCall(f, *args, **kwargs) call.clock = self._reactor d = call.start(msec / 1000.0, now=False) d.addErrback(log_failure, "Looping call died", consumeErrors=False) From bd083a5fcf8633c059625c31062ea0654e7890b3 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 29 Jul 2019 10:04:09 +0200 Subject: [PATCH 55/72] Changelog --- changelog.d/5780.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5780.misc diff --git a/changelog.d/5780.misc b/changelog.d/5780.misc new file mode 100644 index 0000000000..b7eb56e625 --- /dev/null +++ b/changelog.d/5780.misc @@ -0,0 +1 @@ +Allow looping calls to be given arguments. From 45df38e61bb876208d032cf3480230cecff6cdd8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 25 Jul 2019 17:15:54 +0100 Subject: [PATCH 56/72] Fix current_state bg update to work on old SQLite --- synapse/storage/roommember.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index cb88e49b51..a1b9fd8199 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -870,10 +870,10 @@ class RoomMemberStore(RoomMemberWorkerStore): next_room, = row sql = """ - UPDATE current_state_events AS c + UPDATE current_state_events SET membership = ( SELECT membership FROM room_memberships - WHERE event_id = c.event_id + WHERE event_id = current_state_events.event_id ) WHERE room_id = ? """ From 84c6ea1af8383049333213e108941a7831b2a4fd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 29 Jul 2019 13:04:50 +0100 Subject: [PATCH 57/72] Update old deps unit test to use old sqlite3 --- .buildkite/pipeline.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index c8ae1a44be..b75269a155 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -49,14 +49,15 @@ steps: - command: - - "python -m pip install tox" + - "apt-get update && apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev" + - "python3.5 -m pip install tox" - "tox -e py35-old,codecov" label: ":python: 3.5 / SQLite / Old Deps" env: TRIAL_FLAGS: "-j 2" plugins: - docker#v3.0.1: - image: "python:3.5" + image: "ubuntu:xenial" # We use xenail to get an old sqlite and python propagate-environment: true retry: automatic: From d94916852fed15806202d58d903a8b43ff7b4367 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 25 Jul 2019 17:17:20 +0100 Subject: [PATCH 58/72] Newsfile --- changelog.d/5770.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5770.misc diff --git a/changelog.d/5770.misc b/changelog.d/5770.misc new file mode 100644 index 0000000000..5e15dfd5fa --- /dev/null +++ b/changelog.d/5770.misc @@ -0,0 +1 @@ +Reduce database IO usage by optimising queries for current membership. From 3b476f57679f21b1bff6c5c90f19e64eaca00fd7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 29 Jul 2019 15:33:32 +0100 Subject: [PATCH 59/72] Fix debian packages for sid being called buster. (#5775) * Fix debian packages for sid being called buster. I don't know why the sid images return buster as its codename in `lsb_release` but it does, so lets just grab the codename from the distro we pass into dockerfile * Newsfile --- changelog.d/5775.bugfix | 1 + docker/Dockerfile-dhvirtualenv | 5 +++++ docker/build_debian.sh | 3 ++- 3 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5775.bugfix diff --git a/changelog.d/5775.bugfix b/changelog.d/5775.bugfix new file mode 100644 index 0000000000..b124897d80 --- /dev/null +++ b/changelog.d/5775.bugfix @@ -0,0 +1 @@ +Fix debian packaging scripts to correctly build sid packages. diff --git a/docker/Dockerfile-dhvirtualenv b/docker/Dockerfile-dhvirtualenv index 0117ab8bcc..ac9ebcfd88 100644 --- a/docker/Dockerfile-dhvirtualenv +++ b/docker/Dockerfile-dhvirtualenv @@ -42,6 +42,11 @@ RUN cd dh-virtualenv-1.1 && dpkg-buildpackage -us -uc -b ### FROM ${distro} +# Get the distro we want to pull from as a dynamic build variable +# (We need to define it in each build stage) +ARG distro="" +ENV distro ${distro} + # Install the build dependencies # # NB: keep this list in sync with the list of build-deps in debian/control diff --git a/docker/build_debian.sh b/docker/build_debian.sh index 6ed2b39898..f312f0715f 100644 --- a/docker/build_debian.sh +++ b/docker/build_debian.sh @@ -4,7 +4,8 @@ set -ex -DIST=`lsb_release -c -s` +# Get the codename from distro env +DIST=`cut -d ':' -f2 <<< $distro` # we get a read-only copy of the source: make a writeable copy cp -aT /synapse/source /synapse/build From 105e7f6ed3a08bcbf0fac2c7749ccb29f39d1492 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 29 Jul 2019 16:09:48 +0100 Subject: [PATCH 60/72] Remove lost comment --- synapse/storage/schema/delta/56/room_membership_idx.sql | 7 ------- 1 file changed, 7 deletions(-) diff --git a/synapse/storage/schema/delta/56/room_membership_idx.sql b/synapse/storage/schema/delta/56/room_membership_idx.sql index fc0b498843..92ab1f5e65 100644 --- a/synapse/storage/schema/delta/56/room_membership_idx.sql +++ b/synapse/storage/schema/delta/56/room_membership_idx.sql @@ -13,13 +13,6 @@ * limitations under the License. */ --- We add membership to current state so that we don't need to join against --- room_memberships, which can be surprisingly costly (we do such queries --- very frequently). --- This will be null for non-membership events and the content.membership key --- for membership events. (Will also be null for membership events until the --- background update job has finished). - -- Adds an index on room_memberships for fetching all forgotten rooms for a user INSERT INTO background_updates (update_name, progress_json) VALUES ('room_membership_forgotten_idx', '{}'); From 85b0bd8fe05ed78548c9b2b0da768927582f7d70 Mon Sep 17 00:00:00 2001 From: Jorik Schellekens Date: Mon, 29 Jul 2019 16:34:44 +0100 Subject: [PATCH 61/72] Update the device list cache when keys/query is called (#5693) --- changelog.d/5693.bugfix | 1 + synapse/handlers/device.py | 150 ++++++++++++++++++----------------- synapse/handlers/e2e_keys.py | 60 +++++++++++++- 3 files changed, 137 insertions(+), 74 deletions(-) create mode 100644 changelog.d/5693.bugfix diff --git a/changelog.d/5693.bugfix b/changelog.d/5693.bugfix new file mode 100644 index 0000000000..d6f4e590ae --- /dev/null +++ b/changelog.d/5693.bugfix @@ -0,0 +1 @@ +Fix UISIs during homeserver outage. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index d6ab337783..5c1cf83c9d 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -209,12 +209,12 @@ class DeviceHandler(DeviceWorkerHandler): self.federation_sender = hs.get_federation_sender() - self._edu_updater = DeviceListEduUpdater(hs, self) + self.device_list_updater = DeviceListUpdater(hs, self) federation_registry = hs.get_federation_registry() federation_registry.register_edu_handler( - "m.device_list_update", self._edu_updater.incoming_device_list_update + "m.device_list_update", self.device_list_updater.incoming_device_list_update ) federation_registry.register_query_handler( "user_devices", self.on_federation_query_user_devices @@ -426,7 +426,7 @@ def _update_device_from_client_ips(device, client_ips): device.update({"last_seen_ts": ip.get("last_seen"), "last_seen_ip": ip.get("ip")}) -class DeviceListEduUpdater(object): +class DeviceListUpdater(object): "Handles incoming device list updates from federation and updates the DB" def __init__(self, hs, device_handler): @@ -519,75 +519,7 @@ class DeviceListEduUpdater(object): logger.debug("Need to re-sync devices for %r? %r", user_id, resync) if resync: - # Fetch all devices for the user. - origin = get_domain_from_id(user_id) - try: - result = yield self.federation.query_user_devices(origin, user_id) - except ( - NotRetryingDestination, - RequestSendFailed, - HttpResponseException, - ): - # TODO: Remember that we are now out of sync and try again - # later - logger.warn("Failed to handle device list update for %s", user_id) - # We abort on exceptions rather than accepting the update - # as otherwise synapse will 'forget' that its device list - # is out of date. If we bail then we will retry the resync - # next time we get a device list update for this user_id. - # This makes it more likely that the device lists will - # eventually become consistent. - return - except FederationDeniedError as e: - logger.info(e) - return - except Exception: - # TODO: Remember that we are now out of sync and try again - # later - logger.exception( - "Failed to handle device list update for %s", user_id - ) - return - - stream_id = result["stream_id"] - devices = result["devices"] - - # If the remote server has more than ~1000 devices for this user - # we assume that something is going horribly wrong (e.g. a bot - # that logs in and creates a new device every time it tries to - # send a message). Maintaining lots of devices per user in the - # cache can cause serious performance issues as if this request - # takes more than 60s to complete, internal replication from the - # inbound federation worker to the synapse master may time out - # causing the inbound federation to fail and causing the remote - # server to retry, causing a DoS. So in this scenario we give - # up on storing the total list of devices and only handle the - # delta instead. - if len(devices) > 1000: - logger.warn( - "Ignoring device list snapshot for %s as it has >1K devs (%d)", - user_id, - len(devices), - ) - devices = [] - - for device in devices: - logger.debug( - "Handling resync update %r/%r, ID: %r", - user_id, - device["device_id"], - stream_id, - ) - - yield self.store.update_remote_device_list_cache( - user_id, devices, stream_id - ) - device_ids = [device["device_id"] for device in devices] - yield self.device_handler.notify_device_update(user_id, device_ids) - - # We clobber the seen updates since we've re-synced from a given - # point. - self._seen_updates[user_id] = set([stream_id]) + yield self.user_device_resync(user_id) else: # Simply update the single device, since we know that is the only # change (because of the single prev_id matching the current cache) @@ -634,3 +566,77 @@ class DeviceListEduUpdater(object): stream_id_in_updates.add(stream_id) return False + + @defer.inlineCallbacks + def user_device_resync(self, user_id): + """Fetches all devices for a user and updates the device cache with them. + + Args: + user_id (str): The user's id whose device_list will be updated. + Returns: + Deferred[dict]: a dict with device info as under the "devices" in the result of this + request: + https://matrix.org/docs/spec/server_server/r0.1.2#get-matrix-federation-v1-user-devices-userid + """ + # Fetch all devices for the user. + origin = get_domain_from_id(user_id) + try: + result = yield self.federation.query_user_devices(origin, user_id) + except (NotRetryingDestination, RequestSendFailed, HttpResponseException): + # TODO: Remember that we are now out of sync and try again + # later + logger.warn("Failed to handle device list update for %s", user_id) + # We abort on exceptions rather than accepting the update + # as otherwise synapse will 'forget' that its device list + # is out of date. If we bail then we will retry the resync + # next time we get a device list update for this user_id. + # This makes it more likely that the device lists will + # eventually become consistent. + return + except FederationDeniedError as e: + logger.info(e) + return + except Exception: + # TODO: Remember that we are now out of sync and try again + # later + logger.exception("Failed to handle device list update for %s", user_id) + return + stream_id = result["stream_id"] + devices = result["devices"] + + # If the remote server has more than ~1000 devices for this user + # we assume that something is going horribly wrong (e.g. a bot + # that logs in and creates a new device every time it tries to + # send a message). Maintaining lots of devices per user in the + # cache can cause serious performance issues as if this request + # takes more than 60s to complete, internal replication from the + # inbound federation worker to the synapse master may time out + # causing the inbound federation to fail and causing the remote + # server to retry, causing a DoS. So in this scenario we give + # up on storing the total list of devices and only handle the + # delta instead. + if len(devices) > 1000: + logger.warn( + "Ignoring device list snapshot for %s as it has >1K devs (%d)", + user_id, + len(devices), + ) + devices = [] + + for device in devices: + logger.debug( + "Handling resync update %r/%r, ID: %r", + user_id, + device["device_id"], + stream_id, + ) + + yield self.store.update_remote_device_list_cache(user_id, devices, stream_id) + device_ids = [device["device_id"] for device in devices] + yield self.device_handler.notify_device_update(user_id, device_ids) + + # We clobber the seen updates since we've re-synced from a given + # point. + self._seen_updates[user_id] = set([stream_id]) + + defer.returnValue(result) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 1300b540e3..366a0bc68b 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -65,6 +65,7 @@ class E2eKeysHandler(object): } } """ + device_keys_query = query_body.get("device_keys", {}) # separate users by domain. @@ -121,7 +122,58 @@ class E2eKeysHandler(object): # Now fetch any devices that we don't have in our cache @defer.inlineCallbacks def do_remote_query(destination): + """This is called when we are querying the device list of a user on + a remote homeserver and their device list is not in the device list + cache. If we share a room with this user and we're not querying for + specific user we will update the cache + with their device list.""" + destination_query = remote_queries_not_in_cache[destination] + + # We first consider whether we wish to update the device list cache with + # the users device list. We want to track a user's devices when the + # authenticated user shares a room with the queried user and the query + # has not specified a particular device. + # If we update the cache for the queried user we remove them from further + # queries. We use the more efficient batched query_client_keys for all + # remaining users + user_ids_updated = [] + for (user_id, device_list) in destination_query.items(): + if user_id in user_ids_updated: + continue + + if device_list: + continue + + room_ids = yield self.store.get_rooms_for_user(user_id) + if not room_ids: + continue + + # We've decided we're sharing a room with this user and should + # probably be tracking their device lists. However, we haven't + # done an initial sync on the device list so we do it now. + try: + user_devices = yield self.device_handler.device_list_updater.user_device_resync( + user_id + ) + user_devices = user_devices["devices"] + for device in user_devices: + results[user_id] = {device["device_id"]: device["keys"]} + user_ids_updated.append(user_id) + except Exception as e: + failures[destination] = failures.get(destination, []).append( + _exception_to_failure(e) + ) + + if len(destination_query) == len(user_ids_updated): + # We've updated all the users in the query and we do not need to + # make any further remote calls. + return + + # Remove all the users from the query which we have updated + for user_id in user_ids_updated: + destination_query.pop(user_id) + try: remote_result = yield self.federation.query_client_keys( destination, {"device_keys": destination_query}, timeout=timeout @@ -132,7 +184,8 @@ class E2eKeysHandler(object): results[user_id] = keys except Exception as e: - failures[destination] = _exception_to_failure(e) + failure = _exception_to_failure(e) + failures[destination] = failure yield make_deferred_yieldable( defer.gatherResults( @@ -234,8 +287,10 @@ class E2eKeysHandler(object): for user_id, keys in remote_result["one_time_keys"].items(): if user_id in device_keys: json_result[user_id] = keys + except Exception as e: - failures[destination] = _exception_to_failure(e) + failure = _exception_to_failure(e) + failures[destination] = failure yield make_deferred_yieldable( defer.gatherResults( @@ -263,6 +318,7 @@ class E2eKeysHandler(object): @defer.inlineCallbacks def upload_keys_for_user(self, user_id, device_id, keys): + time_now = self.clock.time_msec() # TODO: Validate the JSON to make sure it has the right keys. From df3a5db629daa384cdf291f5ecbb0ff1721c80df Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 29 Jul 2019 16:40:25 +0100 Subject: [PATCH 62/72] Expand comment --- synapse/storage/roommember.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index bfb834ccca..59ea7277fe 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -678,6 +678,11 @@ class RoomMemberWorkerStore(EventsWorkerStore): # to see if any have subsequently been updated. This is done so that # we can use a partial index on `forgotten = 1` on the assumption # that few users will actually forget many rooms. + # + # Note that a room is considered "forgotten" if *all* membership + # events for that user and room have the forgotten field set (as + # when a user forgets a room we update all rows for that user and + # room, not just the current one). sql = """ SELECT room_id, ( SELECT count(*) FROM room_memberships From 97a8b4caf7badb83c941c8afdb7ce237ee19cb7d Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 30 Jul 2019 02:02:18 +1000 Subject: [PATCH 63/72] Move some timeout checking logs to DEBUG #5785 --- changelog.d/5785.misc | 1 + synapse/handlers/presence.py | 2 +- synapse/handlers/typing.py | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/5785.misc diff --git a/changelog.d/5785.misc b/changelog.d/5785.misc new file mode 100644 index 0000000000..0691222c42 --- /dev/null +++ b/changelog.d/5785.misc @@ -0,0 +1 @@ +Set the logs emitted when checking typing and presence timeouts to DEBUG level, not INFO. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index ea54d0b991..94a9ca0357 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -333,7 +333,7 @@ class PresenceHandler(object): """Checks the presence of users that have timed out and updates as appropriate. """ - logger.info("Handling presence timeouts") + logger.debug("Handling presence timeouts") now = self.clock.time_msec() # Fetch the list of users that *may* have timed out. Things may have diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 6b661aa93d..f882330293 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -83,7 +83,7 @@ class TypingHandler(object): self._room_typing = {} def _handle_timeouts(self): - logger.info("Checking for typing timeouts") + logger.debug("Checking for typing timeouts") now = self.clock.time_msec() From 865077f1d1f4866ab874c56b70abbd426fedfb97 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 30 Jul 2019 02:47:27 +1000 Subject: [PATCH 64/72] Room Complexity Client Implementation (#5783) --- changelog.d/5783.feature | 1 + docs/sample_config.yaml | 17 +++++ synapse/config/server.py | 41 ++++++++++++ synapse/federation/federation_client.py | 36 +++++++++++ synapse/federation/transport/client.py | 31 ++++++--- synapse/handlers/federation.py | 25 ++++++++ synapse/handlers/room_member.py | 84 +++++++++++++++++++++++-- tests/federation/test_complexity.py | 77 ++++++++++++++++++++++- 8 files changed, 298 insertions(+), 14 deletions(-) create mode 100644 changelog.d/5783.feature diff --git a/changelog.d/5783.feature b/changelog.d/5783.feature new file mode 100644 index 0000000000..18f5a3cb28 --- /dev/null +++ b/changelog.d/5783.feature @@ -0,0 +1 @@ +Synapse can now be configured to not join remote rooms of a given "complexity" (currently, state events) over federation. This option can be used to prevent adverse performance on resource-constrained homeservers. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 7edf15207a..b92959692d 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -278,6 +278,23 @@ listeners: # Used by phonehome stats to group together related servers. #server_context: context +# Resource-constrained Homeserver Settings +# +# If limit_remote_rooms.enabled is True, the room complexity will be +# checked before a user joins a new remote room. If it is above +# limit_remote_rooms.complexity, it will disallow joining or +# instantly leave. +# +# limit_remote_rooms.complexity_error can be set to customise the text +# displayed to the user when a room above the complexity threshold has +# its join cancelled. +# +# Uncomment the below lines to enable: +#limit_remote_rooms: +# enabled: True +# complexity: 1.0 +# complexity_error: "This room is too complex." + # Whether to require a user to be in the room to add an alias to it. # Defaults to 'true'. # diff --git a/synapse/config/server.py b/synapse/config/server.py index 00170f1393..15449695d1 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -18,6 +18,7 @@ import logging import os.path +import attr from netaddr import IPSet from synapse.api.room_versions import KNOWN_ROOM_VERSIONS @@ -38,6 +39,12 @@ DEFAULT_BIND_ADDRESSES = ["::", "0.0.0.0"] DEFAULT_ROOM_VERSION = "4" +ROOM_COMPLEXITY_TOO_GREAT = ( + "Your homeserver is unable to join rooms this large or complex. " + "Please speak to your server administrator, or upgrade your instance " + "to join this room." +) + class ServerConfig(Config): def read_config(self, config, **kwargs): @@ -247,6 +254,23 @@ class ServerConfig(Config): self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None)) + @attr.s + class LimitRemoteRoomsConfig(object): + enabled = attr.ib( + validator=attr.validators.instance_of(bool), default=False + ) + complexity = attr.ib( + validator=attr.validators.instance_of((int, float)), default=1.0 + ) + complexity_error = attr.ib( + validator=attr.validators.instance_of(str), + default=ROOM_COMPLEXITY_TOO_GREAT, + ) + + self.limit_remote_rooms = LimitRemoteRoomsConfig( + **config.get("limit_remote_rooms", {}) + ) + bind_port = config.get("bind_port") if bind_port: if config.get("no_tls", False): @@ -617,6 +641,23 @@ class ServerConfig(Config): # Used by phonehome stats to group together related servers. #server_context: context + # Resource-constrained Homeserver Settings + # + # If limit_remote_rooms.enabled is True, the room complexity will be + # checked before a user joins a new remote room. If it is above + # limit_remote_rooms.complexity, it will disallow joining or + # instantly leave. + # + # limit_remote_rooms.complexity_error can be set to customise the text + # displayed to the user when a room above the complexity threshold has + # its join cancelled. + # + # Uncomment the below lines to enable: + #limit_remote_rooms: + # enabled: True + # complexity: 1.0 + # complexity_error: "This room is too complex." + # Whether to require a user to be in the room to add an alias to it. # Defaults to 'true'. # diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 25ed1257f1..6e03ce21af 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -993,3 +993,39 @@ class FederationClient(FederationBase): ) raise RuntimeError("Failed to send to any server.") + + @defer.inlineCallbacks + def get_room_complexity(self, destination, room_id): + """ + Fetch the complexity of a remote room from another server. + + Args: + destination (str): The remote server + room_id (str): The room ID to ask about. + + Returns: + Deferred[dict] or Deferred[None]: Dict contains the complexity + metric versions, while None means we could not fetch the complexity. + """ + try: + complexity = yield self.transport_layer.get_room_complexity( + destination=destination, room_id=room_id + ) + defer.returnValue(complexity) + except CodeMessageException as e: + # We didn't manage to get it -- probably a 404. We are okay if other + # servers don't give it to us. + logger.debug( + "Failed to fetch room complexity via %s for %s, got a %d", + destination, + room_id, + e.code, + ) + except Exception: + logger.exception( + "Failed to fetch room complexity via %s for %s", destination, room_id + ) + + # If we don't manage to find it, return None. It's not an error if a + # server doesn't give it to us. + defer.returnValue(None) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 2a6709ff48..0cea0d2a10 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -21,7 +21,11 @@ from six.moves import urllib from twisted.internet import defer from synapse.api.constants import Membership -from synapse.api.urls import FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX +from synapse.api.urls import ( + FEDERATION_UNSTABLE_PREFIX, + FEDERATION_V1_PREFIX, + FEDERATION_V2_PREFIX, +) from synapse.logging.utils import log_function logger = logging.getLogger(__name__) @@ -935,6 +939,23 @@ class TransportLayerClient(object): destination=destination, path=path, data=content, ignore_backoff=True ) + def get_room_complexity(self, destination, room_id): + """ + Args: + destination (str): The remote server + room_id (str): The room ID to ask about. + """ + path = _create_path(FEDERATION_UNSTABLE_PREFIX, "/rooms/%s/complexity", room_id) + + return self.client.get_json(destination=destination, path=path) + + +def _create_path(federation_prefix, path, *args): + """ + Ensures that all args are url encoded. + """ + return federation_prefix + path % tuple(urllib.parse.quote(arg, "") for arg in args) + def _create_v1_path(path, *args): """Creates a path against V1 federation API from the path template and @@ -951,9 +972,7 @@ def _create_v1_path(path, *args): Returns: str """ - return FEDERATION_V1_PREFIX + path % tuple( - urllib.parse.quote(arg, "") for arg in args - ) + return _create_path(FEDERATION_V1_PREFIX, path, *args) def _create_v2_path(path, *args): @@ -971,6 +990,4 @@ def _create_v2_path(path, *args): Returns: str """ - return FEDERATION_V2_PREFIX + path % tuple( - urllib.parse.quote(arg, "") for arg in args - ) + return _create_path(FEDERATION_V2_PREFIX, path, *args) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 89b37dbc1c..10160bfe86 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2796,3 +2796,28 @@ class FederationHandler(BaseHandler): ) else: return user_joined_room(self.distributor, user, room_id) + + @defer.inlineCallbacks + def get_room_complexity(self, remote_room_hosts, room_id): + """ + Fetch the complexity of a remote room over federation. + + Args: + remote_room_hosts (list[str]): The remote servers to ask. + room_id (str): The room ID to ask about. + + Returns: + Deferred[dict] or Deferred[None]: Dict contains the complexity + metric versions, while None means we could not fetch the complexity. + """ + + for host in remote_room_hosts: + res = yield self.federation_client.get_room_complexity(host, room_id) + + # We got a result, return it. + if res: + defer.returnValue(res) + + # We fell off the bottom, couldn't get the complexity from anyone. Oh + # well. + defer.returnValue(None) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index baea08ddd0..249a6d9c5d 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -26,8 +26,7 @@ from unpaddedbase64 import decode_base64 from twisted.internet import defer -import synapse.server -import synapse.types +from synapse import types from synapse.api.constants import EventTypes, Membership from synapse.api.errors import AuthError, Codes, HttpResponseException, SynapseError from synapse.types import RoomID, UserID @@ -543,7 +542,7 @@ class RoomMemberHandler(object): ), "Sender (%s) must be same as requester (%s)" % (sender, requester.user) assert self.hs.is_mine(sender), "Sender must be our own: %s" % (sender,) else: - requester = synapse.types.create_requester(target_user) + requester = types.create_requester(target_user) prev_event = yield self.event_creation_handler.deduplicate_state_event( event, context @@ -945,6 +944,47 @@ class RoomMemberMasterHandler(RoomMemberHandler): self.distributor.declare("user_joined_room") self.distributor.declare("user_left_room") + @defer.inlineCallbacks + def _is_remote_room_too_complex(self, room_id, remote_room_hosts): + """ + Check if complexity of a remote room is too great. + + Args: + room_id (str) + remote_room_hosts (list[str]) + + Returns: bool of whether the complexity is too great, or None + if unable to be fetched + """ + max_complexity = self.hs.config.limit_remote_rooms.complexity + complexity = yield self.federation_handler.get_room_complexity( + remote_room_hosts, room_id + ) + + if complexity: + if complexity["v1"] > max_complexity: + return True + return False + return None + + @defer.inlineCallbacks + def _is_local_room_too_complex(self, room_id): + """ + Check if the complexity of a local room is too great. + + Args: + room_id (str) + + Returns: bool + """ + max_complexity = self.hs.config.limit_remote_rooms.complexity + complexity = yield self.store.get_room_complexity(room_id) + + if complexity["v1"] > max_complexity: + return True + + return False + @defer.inlineCallbacks def _remote_join(self, requester, remote_room_hosts, room_id, user, content): """Implements RoomMemberHandler._remote_join @@ -952,7 +992,6 @@ class RoomMemberMasterHandler(RoomMemberHandler): # filter ourselves out of remote_room_hosts: do_invite_join ignores it # and if it is the only entry we'd like to return a 404 rather than a # 500. - remote_room_hosts = [ host for host in remote_room_hosts if host != self.hs.hostname ] @@ -960,6 +999,18 @@ class RoomMemberMasterHandler(RoomMemberHandler): if len(remote_room_hosts) == 0: raise SynapseError(404, "No known servers") + if self.hs.config.limit_remote_rooms.enabled: + # Fetch the room complexity + too_complex = yield self._is_remote_room_too_complex( + room_id, remote_room_hosts + ) + if too_complex is True: + raise SynapseError( + code=400, + msg=self.hs.config.limit_remote_rooms.complexity_error, + errcode=Codes.RESOURCE_LIMIT_EXCEEDED, + ) + # We don't do an auth check if we are doing an invite # join dance for now, since we're kinda implicitly checking # that we are allowed to join when we decide whether or not we @@ -969,6 +1020,31 @@ class RoomMemberMasterHandler(RoomMemberHandler): ) yield self._user_joined_room(user, room_id) + # Check the room we just joined wasn't too large, if we didn't fetch the + # complexity of it before. + if self.hs.config.limit_remote_rooms.enabled: + if too_complex is False: + # We checked, and we're under the limit. + return + + # Check again, but with the local state events + too_complex = yield self._is_local_room_too_complex(room_id) + + if too_complex is False: + # We're under the limit. + return + + # The room is too large. Leave. + requester = types.create_requester(user, None, False, None) + yield self.update_membership( + requester=requester, target=user, room_id=room_id, action="leave" + ) + raise SynapseError( + code=400, + msg=self.hs.config.limit_remote_rooms.complexity_error, + errcode=Codes.RESOURCE_LIMIT_EXCEEDED, + ) + @defer.inlineCallbacks def _remote_reject_invite(self, requester, remote_room_hosts, room_id, target): """Implements RoomMemberHandler._remote_reject_invite diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py index a5b03005d7..51714a2b06 100644 --- a/tests/federation/test_complexity.py +++ b/tests/federation/test_complexity.py @@ -13,12 +13,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +from mock import Mock + from twisted.internet import defer +from synapse.api.errors import Codes, SynapseError from synapse.config.ratelimiting import FederationRateLimitConfig from synapse.federation.transport import server from synapse.rest import admin from synapse.rest.client.v1 import login, room +from synapse.types import UserID from synapse.util.ratelimitutils import FederationRateLimiter from tests import unittest @@ -33,9 +37,8 @@ class RoomComplexityTests(unittest.HomeserverTestCase): ] def default_config(self, name="test"): - config = super(RoomComplexityTests, self).default_config(name=name) - config["limit_large_remote_room_joins"] = True - config["limit_large_remote_room_complexity"] = 0.05 + config = super().default_config(name=name) + config["limit_remote_rooms"] = {"enabled": True, "complexity": 0.05} return config def prepare(self, reactor, clock, homeserver): @@ -88,3 +91,71 @@ class RoomComplexityTests(unittest.HomeserverTestCase): self.assertEquals(200, channel.code) complexity = channel.json_body["v1"] self.assertEqual(complexity, 1.23) + + def test_join_too_large(self): + + u1 = self.register_user("u1", "pass") + + handler = self.hs.get_room_member_handler() + fed_transport = self.hs.get_federation_transport_client() + + # Mock out some things, because we don't want to test the whole join + fed_transport.client.get_json = Mock(return_value=defer.succeed({"v1": 9999})) + handler.federation_handler.do_invite_join = Mock(return_value=defer.succeed(1)) + + d = handler._remote_join( + None, + ["otherserver.example"], + "roomid", + UserID.from_string(u1), + {"membership": "join"}, + ) + + self.pump() + + # The request failed with a SynapseError saying the resource limit was + # exceeded. + f = self.get_failure(d, SynapseError) + self.assertEqual(f.value.code, 400, f.value) + self.assertEqual(f.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) + + def test_join_too_large_once_joined(self): + + u1 = self.register_user("u1", "pass") + u1_token = self.login("u1", "pass") + + # Ok, this might seem a bit weird -- I want to test that we actually + # leave the room, but I don't want to simulate two servers. So, we make + # a local room, which we say we're joining remotely, even if there's no + # remote, because we mock that out. Then, we'll leave the (actually + # local) room, which will be propagated over federation in a real + # scenario. + room_1 = self.helper.create_room_as(u1, tok=u1_token) + + handler = self.hs.get_room_member_handler() + fed_transport = self.hs.get_federation_transport_client() + + # Mock out some things, because we don't want to test the whole join + fed_transport.client.get_json = Mock(return_value=defer.succeed(None)) + handler.federation_handler.do_invite_join = Mock(return_value=defer.succeed(1)) + + # Artificially raise the complexity + self.hs.get_datastore().get_current_state_event_counts = lambda x: defer.succeed( + 600 + ) + + d = handler._remote_join( + None, + ["otherserver.example"], + room_1, + UserID.from_string(u1), + {"membership": "join"}, + ) + + self.pump() + + # The request failed with a SynapseError saying the resource limit was + # exceeded. + f = self.get_failure(d, SynapseError) + self.assertEqual(f.value.code, 400) + self.assertEqual(f.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) From 8c97f6414cf322fc5b42a92ed0df2fb70bfab3fc Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 30 Jul 2019 08:25:02 +0100 Subject: [PATCH 65/72] Remove non-functional 'expire_access_token' setting (#5782) The `expire_access_token` didn't do what it sounded like it should do. What it actually did was make Synapse enforce the 'time' caveat on macaroons used as access tokens, but since our access token macaroons never contained such a caveat, it was always a no-op. (The code to add 'time' caveats was removed back in v0.18.5, in #1656) --- changelog.d/5782.removal | 1 + docs/sample_config.yaml | 4 --- synapse/api/auth.py | 28 ++++--------------- synapse/config/key.py | 6 ---- synapse/handlers/auth.py | 2 +- tests/handlers/test_register.py | 2 +- .../test_resource_limits_server_notices.py | 2 +- tests/utils.py | 1 - 8 files changed, 9 insertions(+), 37 deletions(-) create mode 100644 changelog.d/5782.removal diff --git a/changelog.d/5782.removal b/changelog.d/5782.removal new file mode 100644 index 0000000000..658bf923ab --- /dev/null +++ b/changelog.d/5782.removal @@ -0,0 +1 @@ +Remove non-functional 'expire_access_token' setting. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index b92959692d..08316597fa 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -942,10 +942,6 @@ uploads_path: "DATADIR/uploads" # # macaroon_secret_key: -# Used to enable access token expiration. -# -#expire_access_token: False - # a secret which is used to calculate HMACs for form values, to stop # falsification of values. Must be specified for the User Consent # forms to work. diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 351790cca4..179644852a 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -410,21 +410,16 @@ class Auth(object): try: user_id = self.get_user_id_from_macaroon(macaroon) - has_expiry = False guest = False for caveat in macaroon.caveats: - if caveat.caveat_id.startswith("time "): - has_expiry = True - elif caveat.caveat_id == "guest = true": + if caveat.caveat_id == "guest = true": guest = True - self.validate_macaroon( - macaroon, rights, self.hs.config.expire_access_token, user_id=user_id - ) + self.validate_macaroon(macaroon, rights, user_id=user_id) except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError): raise InvalidClientTokenError("Invalid macaroon passed.") - if not has_expiry and rights == "access": + if rights == "access": self.token_cache[token] = (user_id, guest) return user_id, guest @@ -450,7 +445,7 @@ class Auth(object): return caveat.caveat_id[len(user_prefix) :] raise InvalidClientTokenError("No user caveat in macaroon") - def validate_macaroon(self, macaroon, type_string, verify_expiry, user_id): + def validate_macaroon(self, macaroon, type_string, user_id): """ validate that a Macaroon is understood by and was signed by this server. @@ -458,7 +453,6 @@ class Auth(object): macaroon(pymacaroons.Macaroon): The macaroon to validate type_string(str): The kind of token required (e.g. "access", "delete_pusher") - verify_expiry(bool): Whether to verify whether the macaroon has expired. user_id (str): The user_id required """ v = pymacaroons.Verifier() @@ -471,19 +465,7 @@ class Auth(object): v.satisfy_exact("type = " + type_string) v.satisfy_exact("user_id = %s" % user_id) v.satisfy_exact("guest = true") - - # verify_expiry should really always be True, but there exist access - # tokens in the wild which expire when they should not, so we can't - # enforce expiry yet (so we have to allow any caveat starting with - # 'time < ' in access tokens). - # - # On the other hand, short-term login tokens (as used by CAS login, for - # example) have an expiry time which we do want to enforce. - - if verify_expiry: - v.satisfy_general(self._verify_expiry) - else: - v.satisfy_general(lambda c: c.startswith("time < ")) + v.satisfy_general(self._verify_expiry) # access_tokens include a nonce for uniqueness: any value is acceptable v.satisfy_general(lambda c: c.startswith("nonce = ")) diff --git a/synapse/config/key.py b/synapse/config/key.py index 8fc74f9cdf..fe8386985c 100644 --- a/synapse/config/key.py +++ b/synapse/config/key.py @@ -116,8 +116,6 @@ class KeyConfig(Config): seed = bytes(self.signing_key[0]) self.macaroon_secret_key = hashlib.sha256(seed).digest() - self.expire_access_token = config.get("expire_access_token", False) - # a secret which is used to calculate HMACs for form values, to stop # falsification of values self.form_secret = config.get("form_secret", None) @@ -144,10 +142,6 @@ class KeyConfig(Config): # %(macaroon_secret_key)s - # Used to enable access token expiration. - # - #expire_access_token: False - # a secret which is used to calculate HMACs for form values, to stop # falsification of values. Must be specified for the User Consent # forms to work. diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 05be5b7c48..0f3ebf7ef8 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -860,7 +860,7 @@ class AuthHandler(BaseHandler): try: macaroon = pymacaroons.Macaroon.deserialize(login_token) user_id = auth_api.get_user_id_from_macaroon(macaroon) - auth_api.validate_macaroon(macaroon, "login", True, user_id) + auth_api.validate_macaroon(macaroon, "login", user_id) except Exception: raise AuthError(403, "Invalid token", errcode=Codes.FORBIDDEN) self.ratelimit_login_per_account(user_id) diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 99dce45cfe..0ad0a88165 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -44,7 +44,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): hs_config["max_mau_value"] = 50 hs_config["limit_usage_by_mau"] = True - hs = self.setup_test_homeserver(config=hs_config, expire_access_token=True) + hs = self.setup_test_homeserver(config=hs_config) return hs def prepare(self, reactor, clock, hs): diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index 984feb623f..cdf89e3383 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -36,7 +36,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): "room_name": "Server Notices", } - hs = self.setup_test_homeserver(config=hs_config, expire_access_token=True) + hs = self.setup_test_homeserver(config=hs_config) return hs def prepare(self, reactor, clock, hs): diff --git a/tests/utils.py b/tests/utils.py index 6350646263..f1eb9a545c 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -126,7 +126,6 @@ def default_config(name, parse=False): "enable_registration": True, "enable_registration_captcha": False, "macaroon_secret_key": "not even a little secret", - "expire_access_token": False, "trusted_third_party_id_servers": [], "room_invite_state_types": [], "password_providers": [], From 458e51df7aabe6fc2736c1aeb6a3556374309879 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 30 Jul 2019 13:07:02 +0100 Subject: [PATCH 66/72] Fix error handling when fetching remote device keys --- synapse/handlers/e2e_keys.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 366a0bc68b..848cd3a0d5 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -161,9 +161,7 @@ class E2eKeysHandler(object): results[user_id] = {device["device_id"]: device["keys"]} user_ids_updated.append(user_id) except Exception as e: - failures[destination] = failures.get(destination, []).append( - _exception_to_failure(e) - ) + failures[destination] = _exception_to_failure(e) if len(destination_query) == len(user_ids_updated): # We've updated all the users in the query and we do not need to From 1ec7d656dd57bce3c43994cc53727639ea05593e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 30 Jul 2019 13:09:02 +0100 Subject: [PATCH 67/72] Unwrap error --- synapse/handlers/e2e_keys.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 848cd3a0d5..1f90b0d278 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -25,6 +25,7 @@ from twisted.internet import defer from synapse.api.errors import CodeMessageException, SynapseError from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.types import UserID, get_domain_from_id +from synapse.util import unwrapFirstError from synapse.util.retryutils import NotRetryingDestination logger = logging.getLogger(__name__) @@ -192,7 +193,7 @@ class E2eKeysHandler(object): for destination in remote_queries_not_in_cache ], consumeErrors=True, - ) + ).addErrback(unwrapFirstError) ) return {"device_keys": results, "failures": failures} From e23ab7f41a2ba0e3e45a70c3a1915f9fd78c15ba Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 30 Jul 2019 13:10:00 +0100 Subject: [PATCH 68/72] Newsfile --- changelog.d/5789.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5789.bugfix diff --git a/changelog.d/5789.bugfix b/changelog.d/5789.bugfix new file mode 100644 index 0000000000..d6f4e590ae --- /dev/null +++ b/changelog.d/5789.bugfix @@ -0,0 +1 @@ +Fix UISIs during homeserver outage. From 15056ca2086f3165a74cad65d35b2b742caf4fee Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 30 Jul 2019 14:51:41 +0100 Subject: [PATCH 69/72] Fix current_state_events membership background update. Turns out not all rooms are in `rooms`, so lets fetch the room list from `current_state_events`. We move the delta file to force it to be run again. --- synapse/storage/roommember.py | 2 +- ...s_membership.sql => current_state_events_membership_mk2.sql} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename synapse/storage/schema/delta/56/{current_state_events_membership.sql => current_state_events_membership_mk2.sql} (100%) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index e60409ed73..eecb276465 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -935,7 +935,7 @@ class RoomMemberStore(RoomMemberWorkerStore): while processed < batch_size: txn.execute( """ - SELECT MIN(room_id) FROM rooms WHERE room_id > ? + SELECT MIN(room_id) FROM current_state_events WHERE room_id > ? """, (last_processed_room,), ) diff --git a/synapse/storage/schema/delta/56/current_state_events_membership.sql b/synapse/storage/schema/delta/56/current_state_events_membership_mk2.sql similarity index 100% rename from synapse/storage/schema/delta/56/current_state_events_membership.sql rename to synapse/storage/schema/delta/56/current_state_events_membership_mk2.sql From 958d69f30066994fbd22f404c4260c63318b8c15 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 30 Jul 2019 14:53:52 +0100 Subject: [PATCH 70/72] Newsfile --- changelog.d/5792.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5792.misc diff --git a/changelog.d/5792.misc b/changelog.d/5792.misc new file mode 100644 index 0000000000..5e15dfd5fa --- /dev/null +++ b/changelog.d/5792.misc @@ -0,0 +1 @@ +Reduce database IO usage by optimising queries for current membership. From 123c04daa7e729ce22c8771d1aa3d79a1a880e29 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 30 Jul 2019 15:29:26 +0100 Subject: [PATCH 71/72] Don't recreate column --- .../56/current_state_events_membership.sql | 22 +++++++++++++++++++ .../current_state_events_membership_mk2.sql | 1 - 2 files changed, 22 insertions(+), 1 deletion(-) create mode 100644 synapse/storage/schema/delta/56/current_state_events_membership.sql diff --git a/synapse/storage/schema/delta/56/current_state_events_membership.sql b/synapse/storage/schema/delta/56/current_state_events_membership.sql new file mode 100644 index 0000000000..473018676f --- /dev/null +++ b/synapse/storage/schema/delta/56/current_state_events_membership.sql @@ -0,0 +1,22 @@ +/* Copyright 2019 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- We add membership to current state so that we don't need to join against +-- room_memberships, which can be surprisingly costly (we do such queries +-- very frequently). +-- This will be null for non-membership events and the content.membership key +-- for membership events. (Will also be null for membership events until the +-- background update job has finished). +ALTER TABLE current_state_events ADD membership TEXT; diff --git a/synapse/storage/schema/delta/56/current_state_events_membership_mk2.sql b/synapse/storage/schema/delta/56/current_state_events_membership_mk2.sql index b2e08cd85d..3133d42d4a 100644 --- a/synapse/storage/schema/delta/56/current_state_events_membership_mk2.sql +++ b/synapse/storage/schema/delta/56/current_state_events_membership_mk2.sql @@ -19,7 +19,6 @@ -- This will be null for non-membership events and the content.membership key -- for membership events. (Will also be null for membership events until the -- background update job has finished). -ALTER TABLE current_state_events ADD membership TEXT; INSERT INTO background_updates (update_name, progress_json) VALUES ('current_state_events_membership', '{}'); From 4037d3220aa265d0888527f05329084eaa4dbe71 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 30 Jul 2019 16:43:59 +0100 Subject: [PATCH 72/72] Newsfile --- changelog.d/5793.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5793.misc diff --git a/changelog.d/5793.misc b/changelog.d/5793.misc new file mode 100644 index 0000000000..5e15dfd5fa --- /dev/null +++ b/changelog.d/5793.misc @@ -0,0 +1 @@ +Reduce database IO usage by optimising queries for current membership.