From a3f124b821f0faf53af9e6c890870ec8cbb47ce5 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Wed, 16 Sep 2020 21:15:55 +0200 Subject: [PATCH 001/134] Switch metaclass initialization to python 3-compatible syntax (#8326) --- changelog.d/8326.misc | 1 + synapse/handlers/room_member.py | 4 +--- synapse/replication/http/_base.py | 4 +--- synapse/storage/databases/main/account_data.py | 8 +++----- synapse/storage/databases/main/push_rule.py | 7 +++---- synapse/storage/databases/main/receipts.py | 8 +++----- synapse/storage/databases/main/stream.py | 4 +--- synapse/types.py | 6 +++--- 8 files changed, 16 insertions(+), 26 deletions(-) create mode 100644 changelog.d/8326.misc diff --git a/changelog.d/8326.misc b/changelog.d/8326.misc new file mode 100644 index 0000000000..985d2c027a --- /dev/null +++ b/changelog.d/8326.misc @@ -0,0 +1 @@ +Update outdated usages of `metaclass` to python 3 syntax. \ No newline at end of file diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 01a6e88262..8feba8c90a 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -51,14 +51,12 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) -class RoomMemberHandler: +class RoomMemberHandler(metaclass=abc.ABCMeta): # TODO(paul): This handler currently contains a messy conflation of # low-level API that works on UserID objects and so on, and REST-level # API that takes ID strings and returns pagination chunks. These concerns # ought to be separated out a lot better. - __metaclass__ = abc.ABCMeta - def __init__(self, hs: "HomeServer"): self.hs = hs self.store = hs.get_datastore() diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index ba16f22c91..b448da6710 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -33,7 +33,7 @@ from synapse.util.stringutils import random_string logger = logging.getLogger(__name__) -class ReplicationEndpoint: +class ReplicationEndpoint(metaclass=abc.ABCMeta): """Helper base class for defining new replication HTTP endpoints. This creates an endpoint under `/_synapse/replication/:NAME/:PATH_ARGS..` @@ -72,8 +72,6 @@ class ReplicationEndpoint: is received. """ - __metaclass__ = abc.ABCMeta - NAME = abc.abstractproperty() # type: str # type: ignore PATH_ARGS = abc.abstractproperty() # type: Tuple[str, ...] # type: ignore METHOD = "POST" diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 4436b1a83d..5f1a2b9aa6 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -29,15 +29,13 @@ from synapse.util.caches.stream_change_cache import StreamChangeCache logger = logging.getLogger(__name__) -class AccountDataWorkerStore(SQLBaseStore): +# The ABCMeta metaclass ensures that it cannot be instantiated without +# the abstract methods being implemented. +class AccountDataWorkerStore(SQLBaseStore, metaclass=abc.ABCMeta): """This is an abstract base class where subclasses must implement `get_max_account_data_stream_id` which can be called in the initializer. """ - # This ABCMeta metaclass ensures that we cannot be instantiated without - # the abstract methods being implemented. - __metaclass__ = abc.ABCMeta - def __init__(self, database: DatabasePool, db_conn, hs): account_max = self.get_max_account_data_stream_id() self._account_data_stream_cache = StreamChangeCache( diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index 9790a31998..b7a8d34ce1 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -61,6 +61,8 @@ def _load_rules(rawrules, enabled_map, use_new_defaults=False): return rules +# The ABCMeta metaclass ensures that it cannot be instantiated without +# the abstract methods being implemented. class PushRulesWorkerStore( ApplicationServiceWorkerStore, ReceiptsWorkerStore, @@ -68,15 +70,12 @@ class PushRulesWorkerStore( RoomMemberWorkerStore, EventsWorkerStore, SQLBaseStore, + metaclass=abc.ABCMeta, ): """This is an abstract base class where subclasses must implement `get_max_push_rules_stream_id` which can be called in the initializer. """ - # This ABCMeta metaclass ensures that we cannot be instantiated without - # the abstract methods being implemented. - __metaclass__ = abc.ABCMeta - def __init__(self, database: DatabasePool, db_conn, hs): super(PushRulesWorkerStore, self).__init__(database, db_conn, hs) diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 4a0d5a320e..6568bddd81 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -31,15 +31,13 @@ from synapse.util.caches.stream_change_cache import StreamChangeCache logger = logging.getLogger(__name__) -class ReceiptsWorkerStore(SQLBaseStore): +# The ABCMeta metaclass ensures that it cannot be instantiated without +# the abstract methods being implemented. +class ReceiptsWorkerStore(SQLBaseStore, metaclass=abc.ABCMeta): """This is an abstract base class where subclasses must implement `get_max_receipt_stream_id` which can be called in the initializer. """ - # This ABCMeta metaclass ensures that we cannot be instantiated without - # the abstract methods being implemented. - __metaclass__ = abc.ABCMeta - def __init__(self, database: DatabasePool, db_conn, hs): super(ReceiptsWorkerStore, self).__init__(database, db_conn, hs) diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 2e95518752..7dbe11513b 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -259,14 +259,12 @@ def filter_to_clause(event_filter: Optional[Filter]) -> Tuple[str, List[str]]: return " AND ".join(clauses), args -class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): +class StreamWorkerStore(EventsWorkerStore, SQLBaseStore, metaclass=abc.ABCMeta): """This is an abstract base class where subclasses must implement `get_room_max_stream_ordering` and `get_room_min_stream_ordering` which can be called in the initializer. """ - __metaclass__ = abc.ABCMeta - def __init__(self, database: DatabasePool, db_conn, hs: "HomeServer"): super(StreamWorkerStore, self).__init__(database, db_conn, hs) diff --git a/synapse/types.py b/synapse/types.py index dc09448bdc..a6fc7df22c 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -165,7 +165,9 @@ def get_localpart_from_id(string): DS = TypeVar("DS", bound="DomainSpecificString") -class DomainSpecificString(namedtuple("DomainSpecificString", ("localpart", "domain"))): +class DomainSpecificString( + namedtuple("DomainSpecificString", ("localpart", "domain")), metaclass=abc.ABCMeta +): """Common base class among ID/name strings that have a local part and a domain name, prefixed with a sigil. @@ -175,8 +177,6 @@ class DomainSpecificString(namedtuple("DomainSpecificString", ("localpart", "dom 'domain' : The domain part of the name """ - __metaclass__ = abc.ABCMeta - SIGIL = abc.abstractproperty() # type: str # type: ignore # Deny iteration because it will bite you if you try to create a singleton From 53284c425e219fbd9ae445bbe4a8628883a3631d Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Thu, 17 Sep 2020 12:54:56 +0200 Subject: [PATCH 002/134] Fix a potential bug of UnboundLocalError (#8329) Replaced with less buggier control flow --- changelog.d/8329.bugfix | 1 + synapse/rest/client/v2_alpha/register.py | 13 ++++++++----- 2 files changed, 9 insertions(+), 5 deletions(-) create mode 100644 changelog.d/8329.bugfix diff --git a/changelog.d/8329.bugfix b/changelog.d/8329.bugfix new file mode 100644 index 0000000000..2f71f1f4b9 --- /dev/null +++ b/changelog.d/8329.bugfix @@ -0,0 +1 @@ +Fix UnboundLocalError from occuring when appservices send malformed register request. \ No newline at end of file diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index b6b90a8b30..0705718d00 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -431,11 +431,14 @@ class RegisterRestServlet(RestServlet): access_token = self.auth.get_access_token_from_request(request) - if isinstance(desired_username, str): - result = await self._do_appservice_registration( - desired_username, access_token, body - ) - return 200, result # we throw for non 200 responses + if not isinstance(desired_username, str): + raise SynapseError(400, "Desired Username is missing or not a string") + + result = await self._do_appservice_registration( + desired_username, access_token, body + ) + + return 200, result # == Normal User Registration == (everyone else) if not self._registration_enabled: From c3c9732c5363ef007dd838dea016719d3ab07a89 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 17 Sep 2020 07:04:15 -0400 Subject: [PATCH 003/134] Use admin_patterns for all admin APIs. (#8331) This reduces duplication of the admin prefix in regular expressions. --- changelog.d/8331.misc | 1 + synapse/rest/admin/__init__.py | 4 ++-- synapse/rest/admin/_base.py | 4 ++-- synapse/rest/admin/devices.py | 15 +++++---------- synapse/rest/admin/purge_room_servlet.py | 5 ++--- synapse/rest/admin/server_notice_servlet.py | 9 ++++----- synapse/rest/admin/users.py | 8 ++++---- 7 files changed, 20 insertions(+), 26 deletions(-) create mode 100644 changelog.d/8331.misc diff --git a/changelog.d/8331.misc b/changelog.d/8331.misc new file mode 100644 index 0000000000..0e1bae20ef --- /dev/null +++ b/changelog.d/8331.misc @@ -0,0 +1 @@ +Use the `admin_patterns` helper in additional locations. diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 1c88c93f38..abf362c7b7 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -16,13 +16,13 @@ import logging import platform -import re import synapse from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.server import JsonResource from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.rest.admin._base import ( + admin_patterns, assert_requester_is_admin, historical_admin_path_patterns, ) @@ -61,7 +61,7 @@ logger = logging.getLogger(__name__) class VersionServlet(RestServlet): - PATTERNS = (re.compile("^/_synapse/admin/v1/server_version$"),) + PATTERNS = admin_patterns("/server_version$") def __init__(self, hs): self.res = { diff --git a/synapse/rest/admin/_base.py b/synapse/rest/admin/_base.py index d82eaf5e38..db9fea263a 100644 --- a/synapse/rest/admin/_base.py +++ b/synapse/rest/admin/_base.py @@ -44,7 +44,7 @@ def historical_admin_path_patterns(path_regex): ] -def admin_patterns(path_regex: str): +def admin_patterns(path_regex: str, version: str = "v1"): """Returns the list of patterns for an admin endpoint Args: @@ -54,7 +54,7 @@ def admin_patterns(path_regex: str): Returns: A list of regex patterns. """ - admin_prefix = "^/_synapse/admin/v1" + admin_prefix = "^/_synapse/admin/" + version patterns = [re.compile(admin_prefix + path_regex)] return patterns diff --git a/synapse/rest/admin/devices.py b/synapse/rest/admin/devices.py index 8d32677339..4670d7160d 100644 --- a/synapse/rest/admin/devices.py +++ b/synapse/rest/admin/devices.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -import re from synapse.api.errors import NotFoundError, SynapseError from synapse.http.servlet import ( @@ -21,7 +20,7 @@ from synapse.http.servlet import ( assert_params_in_dict, parse_json_object_from_request, ) -from synapse.rest.admin._base import assert_requester_is_admin +from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin from synapse.types import UserID logger = logging.getLogger(__name__) @@ -32,10 +31,8 @@ class DeviceRestServlet(RestServlet): Get, update or delete the given user's device """ - PATTERNS = ( - re.compile( - "^/_synapse/admin/v2/users/(?P[^/]*)/devices/(?P[^/]*)$" - ), + PATTERNS = admin_patterns( + "/users/(?P[^/]*)/devices/(?P[^/]*)$", "v2" ) def __init__(self, hs): @@ -98,7 +95,7 @@ class DevicesRestServlet(RestServlet): Retrieve the given user's devices """ - PATTERNS = (re.compile("^/_synapse/admin/v2/users/(?P[^/]*)/devices$"),) + PATTERNS = admin_patterns("/users/(?P[^/]*)/devices$", "v2") def __init__(self, hs): """ @@ -131,9 +128,7 @@ class DeleteDevicesRestServlet(RestServlet): key which lists the device_ids to delete. """ - PATTERNS = ( - re.compile("^/_synapse/admin/v2/users/(?P[^/]*)/delete_devices$"), - ) + PATTERNS = admin_patterns("/users/(?P[^/]*)/delete_devices$", "v2") def __init__(self, hs): self.hs = hs diff --git a/synapse/rest/admin/purge_room_servlet.py b/synapse/rest/admin/purge_room_servlet.py index f474066542..8b7bb6d44e 100644 --- a/synapse/rest/admin/purge_room_servlet.py +++ b/synapse/rest/admin/purge_room_servlet.py @@ -12,14 +12,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import re - from synapse.http.servlet import ( RestServlet, assert_params_in_dict, parse_json_object_from_request, ) from synapse.rest.admin import assert_requester_is_admin +from synapse.rest.admin._base import admin_patterns class PurgeRoomServlet(RestServlet): @@ -35,7 +34,7 @@ class PurgeRoomServlet(RestServlet): {} """ - PATTERNS = (re.compile("^/_synapse/admin/v1/purge_room$"),) + PATTERNS = admin_patterns("/purge_room$") def __init__(self, hs): """ diff --git a/synapse/rest/admin/server_notice_servlet.py b/synapse/rest/admin/server_notice_servlet.py index 6e9a874121..375d055445 100644 --- a/synapse/rest/admin/server_notice_servlet.py +++ b/synapse/rest/admin/server_notice_servlet.py @@ -12,8 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import re - from synapse.api.constants import EventTypes from synapse.api.errors import SynapseError from synapse.http.servlet import ( @@ -22,6 +20,7 @@ from synapse.http.servlet import ( parse_json_object_from_request, ) from synapse.rest.admin import assert_requester_is_admin +from synapse.rest.admin._base import admin_patterns from synapse.rest.client.transactions import HttpTransactionCache from synapse.types import UserID @@ -56,13 +55,13 @@ class SendServerNoticeServlet(RestServlet): self.snm = hs.get_server_notices_manager() def register(self, json_resource): - PATTERN = "^/_synapse/admin/v1/send_server_notice" + PATTERN = "/send_server_notice" json_resource.register_paths( - "POST", (re.compile(PATTERN + "$"),), self.on_POST, self.__class__.__name__ + "POST", admin_patterns(PATTERN + "$"), self.on_POST, self.__class__.__name__ ) json_resource.register_paths( "PUT", - (re.compile(PATTERN + "/(?P[^/]*)$"),), + admin_patterns(PATTERN + "/(?P[^/]*)$"), self.on_PUT, self.__class__.__name__, ) diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index f3e77da850..0f537031c4 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -15,7 +15,6 @@ import hashlib import hmac import logging -import re from http import HTTPStatus from synapse.api.constants import UserTypes @@ -29,6 +28,7 @@ from synapse.http.servlet import ( parse_string, ) from synapse.rest.admin._base import ( + admin_patterns, assert_requester_is_admin, assert_user_is_admin, historical_admin_path_patterns, @@ -60,7 +60,7 @@ class UsersRestServlet(RestServlet): class UsersRestServletV2(RestServlet): - PATTERNS = (re.compile("^/_synapse/admin/v2/users$"),) + PATTERNS = admin_patterns("/users$", "v2") """Get request to list all local users. This needs user to have administrator access in Synapse. @@ -105,7 +105,7 @@ class UsersRestServletV2(RestServlet): class UserRestServletV2(RestServlet): - PATTERNS = (re.compile("^/_synapse/admin/v2/users/(?P[^/]+)$"),) + PATTERNS = admin_patterns("/users/(?P[^/]+)$", "v2") """Get request to list user details. This needs user to have administrator access in Synapse. @@ -642,7 +642,7 @@ class UserAdminServlet(RestServlet): {} """ - PATTERNS = (re.compile("^/_synapse/admin/v1/users/(?P[^/]*)/admin$"),) + PATTERNS = admin_patterns("/users/(?P[^/]*)/admin$") def __init__(self, hs): self.hs = hs From 837293c314b47e988fe9532115476a6536cd6406 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Thu, 17 Sep 2020 14:37:01 +0200 Subject: [PATCH 004/134] Remove obsolete __future__ imports (#8337) --- changelog.d/8337.misc | 1 + contrib/cmdclient/console.py | 2 -- contrib/cmdclient/http.py | 2 -- contrib/graph/graph.py | 2 -- contrib/graph/graph3.py | 2 -- contrib/jitsimeetbridge/jitsimeetbridge.py | 2 -- contrib/scripts/kick_users.py | 8 +------- scripts-dev/definitions.py | 2 -- scripts-dev/dump_macaroon.py | 2 -- scripts-dev/federation_client.py | 2 -- scripts-dev/hash_history.py | 2 -- scripts/move_remote_media_to_new_store.py | 2 -- scripts/register_new_matrix_user | 2 -- synapse/_scripts/register_new_matrix_user.py | 2 -- synapse/app/homeserver.py | 2 -- synapse/config/emailconfig.py | 1 - synapse/config/stats.py | 2 -- synapse/storage/databases/main/events_worker.py | 2 -- synapse/util/patch_inline_callbacks.py | 2 -- 19 files changed, 2 insertions(+), 40 deletions(-) create mode 100644 changelog.d/8337.misc diff --git a/changelog.d/8337.misc b/changelog.d/8337.misc new file mode 100644 index 0000000000..4daf272204 --- /dev/null +++ b/changelog.d/8337.misc @@ -0,0 +1 @@ +Remove `__future__` imports related to Python 2 compatibility. \ No newline at end of file diff --git a/contrib/cmdclient/console.py b/contrib/cmdclient/console.py index dfc1d294dc..ab1e1f1f4c 100755 --- a/contrib/cmdclient/console.py +++ b/contrib/cmdclient/console.py @@ -15,8 +15,6 @@ # limitations under the License. """ Starts a synapse client console. """ -from __future__ import print_function - import argparse import cmd import getpass diff --git a/contrib/cmdclient/http.py b/contrib/cmdclient/http.py index cd3260b27d..345120b612 100644 --- a/contrib/cmdclient/http.py +++ b/contrib/cmdclient/http.py @@ -13,8 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import json import urllib from pprint import pformat diff --git a/contrib/graph/graph.py b/contrib/graph/graph.py index de33fac1c7..fdbac087bd 100644 --- a/contrib/graph/graph.py +++ b/contrib/graph/graph.py @@ -1,5 +1,3 @@ -from __future__ import print_function - import argparse import cgi import datetime diff --git a/contrib/graph/graph3.py b/contrib/graph/graph3.py index 91db98e7ef..dd0c19368b 100644 --- a/contrib/graph/graph3.py +++ b/contrib/graph/graph3.py @@ -1,5 +1,3 @@ -from __future__ import print_function - import argparse import cgi import datetime diff --git a/contrib/jitsimeetbridge/jitsimeetbridge.py b/contrib/jitsimeetbridge/jitsimeetbridge.py index 69aa74bd34..b3de468687 100644 --- a/contrib/jitsimeetbridge/jitsimeetbridge.py +++ b/contrib/jitsimeetbridge/jitsimeetbridge.py @@ -10,8 +10,6 @@ the bridge. Requires: npm install jquery jsdom """ -from __future__ import print_function - import json import subprocess import time diff --git a/contrib/scripts/kick_users.py b/contrib/scripts/kick_users.py index 372dbd9e4f..f8e0c732fb 100755 --- a/contrib/scripts/kick_users.py +++ b/contrib/scripts/kick_users.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -from __future__ import print_function import json import sys @@ -8,11 +7,6 @@ from argparse import ArgumentParser import requests -try: - raw_input -except NameError: # Python 3 - raw_input = input - def _mkurl(template, kws): for key in kws: @@ -58,7 +52,7 @@ def main(hs, room_id, access_token, user_id_prefix, why): print("The following user IDs will be kicked from %s" % room_name) for uid in kick_list: print(uid) - doit = raw_input("Continue? [Y]es\n") + doit = input("Continue? [Y]es\n") if len(doit) > 0 and doit.lower() == "y": print("Kicking members...") # encode them all diff --git a/scripts-dev/definitions.py b/scripts-dev/definitions.py index 9eddb6d515..15e6ce6e16 100755 --- a/scripts-dev/definitions.py +++ b/scripts-dev/definitions.py @@ -1,7 +1,5 @@ #! /usr/bin/python -from __future__ import print_function - import argparse import ast import os diff --git a/scripts-dev/dump_macaroon.py b/scripts-dev/dump_macaroon.py index 22b30fa78e..980b5e709f 100755 --- a/scripts-dev/dump_macaroon.py +++ b/scripts-dev/dump_macaroon.py @@ -1,7 +1,5 @@ #!/usr/bin/env python2 -from __future__ import print_function - import sys import pymacaroons diff --git a/scripts-dev/federation_client.py b/scripts-dev/federation_client.py index ad12523c4d..848a826f17 100755 --- a/scripts-dev/federation_client.py +++ b/scripts-dev/federation_client.py @@ -15,8 +15,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import argparse import base64 import json diff --git a/scripts-dev/hash_history.py b/scripts-dev/hash_history.py index 89acb52e6a..8d6c3d24db 100644 --- a/scripts-dev/hash_history.py +++ b/scripts-dev/hash_history.py @@ -1,5 +1,3 @@ -from __future__ import print_function - import sqlite3 import sys diff --git a/scripts/move_remote_media_to_new_store.py b/scripts/move_remote_media_to_new_store.py index b5b63933ab..ab2e763386 100755 --- a/scripts/move_remote_media_to_new_store.py +++ b/scripts/move_remote_media_to_new_store.py @@ -32,8 +32,6 @@ To use, pipe the above into:: PYTHON_PATH=. ./scripts/move_remote_media_to_new_store.py """ -from __future__ import print_function - import argparse import logging import os diff --git a/scripts/register_new_matrix_user b/scripts/register_new_matrix_user index b450712ab7..8b9d30877d 100755 --- a/scripts/register_new_matrix_user +++ b/scripts/register_new_matrix_user @@ -14,8 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - from synapse._scripts.register_new_matrix_user import main if __name__ == "__main__": diff --git a/synapse/_scripts/register_new_matrix_user.py b/synapse/_scripts/register_new_matrix_user.py index 55cce2db22..da0996edbc 100644 --- a/synapse/_scripts/register_new_matrix_user.py +++ b/synapse/_scripts/register_new_matrix_user.py @@ -14,8 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import argparse import getpass import hashlib diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index b08319ca77..dff739e106 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -15,8 +15,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import gc import logging import math diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 72b42bfd62..cceffbfee2 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -14,7 +14,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function # This file can't be called email.py because if it is, we cannot: import email.utils diff --git a/synapse/config/stats.py b/synapse/config/stats.py index 62485189ea..b559bfa411 100644 --- a/synapse/config/stats.py +++ b/synapse/config/stats.py @@ -13,8 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import division - import sys from ._base import Config diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 17f5997b89..cd3739c16c 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -13,8 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import division - import itertools import logging import threading diff --git a/synapse/util/patch_inline_callbacks.py b/synapse/util/patch_inline_callbacks.py index 54c046b6e1..72574d3af2 100644 --- a/synapse/util/patch_inline_callbacks.py +++ b/synapse/util/patch_inline_callbacks.py @@ -13,8 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import functools import sys from typing import Any, Callable, List From ad055ea4cc34a415ef82a22e7b38b356ef23dbad Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 9 Sep 2020 13:25:59 +0100 Subject: [PATCH 005/134] blacklist MSC2753 sytests until it's implemented in synapse (#8285) Dendrite's implementing MSC2753 over at https://github.com/matrix-org/dendrite/pull/1370 to prove the implementation for MSC purposes, and so sytest has sprouted tests for it over at https://github.com/matrix-org/sytest/pull/944. But we don't want them to run on synapse until synapse implements it. --- changelog.d/8285.misc | 1 + sytest-blacklist | 8 ++++++++ 2 files changed, 9 insertions(+) create mode 100644 changelog.d/8285.misc diff --git a/changelog.d/8285.misc b/changelog.d/8285.misc new file mode 100644 index 0000000000..4646664ba1 --- /dev/null +++ b/changelog.d/8285.misc @@ -0,0 +1 @@ +Blacklist [MSC2753](https://github.com/matrix-org/matrix-doc/pull/2753) SyTests until it is implemented. \ No newline at end of file diff --git a/sytest-blacklist b/sytest-blacklist index 79b2d4402a..b563448016 100644 --- a/sytest-blacklist +++ b/sytest-blacklist @@ -36,3 +36,11 @@ Inbound federation of state requires event_id as a mandatory paramater # Blacklisted until https://github.com/matrix-org/synapse/pull/6486 lands Can upload self-signing keys + +# Blacklisted until MSC2753 is implemented +Local users can peek into world_readable rooms by room ID +We can't peek into rooms with shared history_visibility +We can't peek into rooms with invited history_visibility +We can't peek into rooms with joined history_visibility +Local users can peek by room alias +Peeked rooms only turn up in the sync for the device who peeked them From efb6b6629c78409251f61857f2bfe6c2f8f8fb8d Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Thu, 17 Sep 2020 22:45:22 +0200 Subject: [PATCH 006/134] Move lint dependencies to extras_require (#8330) Lint dependencies can now be installed with pip install -e ".[lint]" This should help keep the version in sync between tox and documentation. --- CONTRIBUTING.md | 6 +++--- changelog.d/8330.misc | 1 + synapse/python_dependencies.py | 5 ++++- tox.ini | 10 ++-------- 4 files changed, 10 insertions(+), 12 deletions(-) create mode 100644 changelog.d/8330.misc diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 062413e925..524f82433d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -17,9 +17,9 @@ https://help.github.com/articles/using-pull-requests/) to ask us to pull your changes into our repo. Some other points to follow: - + * Please base your changes on the `develop` branch. - + * Please follow the [code style requirements](#code-style). * Please include a [changelog entry](#changelog) with each PR. @@ -46,7 +46,7 @@ locally. You'll need python 3.6 or later, and to install a number of tools: ``` # Install the dependencies -pip install -U black flake8 flake8-comprehensions isort +pip install -e ".[lint]" # Run the linter script ./scripts-dev/lint.sh diff --git a/changelog.d/8330.misc b/changelog.d/8330.misc new file mode 100644 index 0000000000..c51370f215 --- /dev/null +++ b/changelog.d/8330.misc @@ -0,0 +1 @@ +Move lint-related dependencies to package-extra field, update CONTRIBUTING.md to utilise this. \ No newline at end of file diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index ff0c67228b..67f019fd22 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -104,13 +104,16 @@ CONDITIONAL_REQUIREMENTS = { # hiredis is not a *strict* dependency, but it makes things much faster. # (if it is not installed, we fall back to slow code.) "redis": ["txredisapi>=1.4.7", "hiredis"], + # We pin black so that our tests don't start failing on new releases. + "lint": ["isort==5.0.3", "black==19.10b0", "flake8-comprehensions", "flake8"], } ALL_OPTIONAL_REQUIREMENTS = set() # type: Set[str] for name, optional_deps in CONDITIONAL_REQUIREMENTS.items(): # Exclude systemd as it's a system-based requirement. - if name not in ["systemd"]: + # Exclude lint as it's a dev-based requirement. + if name not in ["systemd", "lint"]: ALL_OPTIONAL_REQUIREMENTS = set(optional_deps) | ALL_OPTIONAL_REQUIREMENTS diff --git a/tox.ini b/tox.ini index df473bd234..ddcab0198f 100644 --- a/tox.ini +++ b/tox.ini @@ -118,20 +118,14 @@ commands = check-manifest [testenv:check_codestyle] -skip_install = True -deps = - flake8 - flake8-comprehensions - # We pin so that our tests don't start failing on new releases of black. - black==19.10b0 +extras = lint commands = python -m black --check --diff . /bin/sh -c "flake8 synapse tests scripts scripts-dev contrib synctl {env:PEP8SUFFIX:}" {toxinidir}/scripts-dev/config-lint.sh [testenv:check_isort] -skip_install = True -deps = isort==5.0.3 +extras = lint commands = /bin/sh -c "isort -c --df --sp setup.cfg synapse tests scripts-dev scripts" [testenv:check-newsfragment] From 14b5b48a2285a43f655030ec242b92d217c1994a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 18 Sep 2020 10:49:29 +0100 Subject: [PATCH 007/134] Fix ratelimiting for federation `/send` requests. (#8342) c.f. #8295 for rationale --- changelog.d/8342.bugfix | 1 + synapse/federation/federation_server.py | 52 +++++++++++++++++++------ synapse/federation/transport/server.py | 13 ++++--- synapse/server.py | 5 +++ 4 files changed, 54 insertions(+), 17 deletions(-) create mode 100644 changelog.d/8342.bugfix diff --git a/changelog.d/8342.bugfix b/changelog.d/8342.bugfix new file mode 100644 index 0000000000..786057facb --- /dev/null +++ b/changelog.d/8342.bugfix @@ -0,0 +1 @@ +Fix ratelimitng of federation `/send` requests. diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 218df884b0..ff00f0b302 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -97,10 +97,16 @@ class FederationServer(FederationBase): self.state = hs.get_state_handler() self.device_handler = hs.get_device_handler() + self._federation_ratelimiter = hs.get_federation_ratelimiter() self._server_linearizer = Linearizer("fed_server") self._transaction_linearizer = Linearizer("fed_txn_handler") + # We cache results for transaction with the same ID + self._transaction_resp_cache = ResponseCache( + hs, "fed_txn_handler", timeout_ms=30000 + ) + self.transaction_actions = TransactionActions(self.store) self.registry = hs.get_federation_registry() @@ -135,22 +141,44 @@ class FederationServer(FederationBase): request_time = self._clock.time_msec() transaction = Transaction(**transaction_data) + transaction_id = transaction.transaction_id # type: ignore - if not transaction.transaction_id: # type: ignore + if not transaction_id: raise Exception("Transaction missing transaction_id") - logger.debug("[%s] Got transaction", transaction.transaction_id) # type: ignore + logger.debug("[%s] Got transaction", transaction_id) - # use a linearizer to ensure that we don't process the same transaction - # multiple times in parallel. - with ( - await self._transaction_linearizer.queue( - (origin, transaction.transaction_id) # type: ignore - ) - ): - result = await self._handle_incoming_transaction( - origin, transaction, request_time - ) + # We wrap in a ResponseCache so that we de-duplicate retried + # transactions. + return await self._transaction_resp_cache.wrap( + (origin, transaction_id), + self._on_incoming_transaction_inner, + origin, + transaction, + request_time, + ) + + async def _on_incoming_transaction_inner( + self, origin: str, transaction: Transaction, request_time: int + ) -> Tuple[int, Dict[str, Any]]: + # Use a linearizer to ensure that transactions from a remote are + # processed in order. + with await self._transaction_linearizer.queue(origin): + # We rate limit here *after* we've queued up the incoming requests, + # so that we don't fill up the ratelimiter with blocked requests. + # + # This is important as the ratelimiter allows N concurrent requests + # at a time, and only starts ratelimiting if there are more requests + # than that being processed at a time. If we queued up requests in + # the linearizer/response cache *after* the ratelimiting then those + # queued up requests would count as part of the allowed limit of N + # concurrent requests. + with self._federation_ratelimiter.ratelimit(origin) as d: + await d + + result = await self._handle_incoming_transaction( + origin, transaction, request_time + ) return result diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 9325e0f857..cc7e9a973b 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -45,7 +45,6 @@ from synapse.logging.opentracing import ( ) from synapse.server import HomeServer from synapse.types import ThirdPartyInstanceID, get_domain_from_id -from synapse.util.ratelimitutils import FederationRateLimiter from synapse.util.versionstring import get_version_string logger = logging.getLogger(__name__) @@ -72,9 +71,7 @@ class TransportLayerServer(JsonResource): super(TransportLayerServer, self).__init__(hs, canonical_json=False) self.authenticator = Authenticator(hs) - self.ratelimiter = FederationRateLimiter( - self.clock, config=hs.config.rc_federation - ) + self.ratelimiter = hs.get_federation_ratelimiter() self.register_servlets() @@ -272,6 +269,8 @@ class BaseFederationServlet: PREFIX = FEDERATION_V1_PREFIX # Allows specifying the API version + RATELIMIT = True # Whether to rate limit requests or not + def __init__(self, handler, authenticator, ratelimiter, server_name): self.handler = handler self.authenticator = authenticator @@ -335,7 +334,7 @@ class BaseFederationServlet: ) with scope: - if origin: + if origin and self.RATELIMIT: with ratelimiter.ratelimit(origin) as d: await d if request._disconnected: @@ -372,6 +371,10 @@ class BaseFederationServlet: class FederationSendServlet(BaseFederationServlet): PATH = "/send/(?P[^/]*)/?" + # We ratelimit manually in the handler as we queue up the requests and we + # don't want to fill up the ratelimiter with blocked requests. + RATELIMIT = False + def __init__(self, handler, server_name, **kwargs): super(FederationSendServlet, self).__init__( handler, server_name=server_name, **kwargs diff --git a/synapse/server.py b/synapse/server.py index 9055b97ac3..5e3752c333 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -114,6 +114,7 @@ from synapse.streams.events import EventSources from synapse.types import DomainSpecificString from synapse.util import Clock from synapse.util.distributor import Distributor +from synapse.util.ratelimitutils import FederationRateLimiter from synapse.util.stringutils import random_string logger = logging.getLogger(__name__) @@ -642,6 +643,10 @@ class HomeServer(metaclass=abc.ABCMeta): def get_replication_streams(self) -> Dict[str, Stream]: return {stream.NAME: stream(self) for stream in STREAMS_MAP.values()} + @cache_in_self + def get_federation_ratelimiter(self) -> FederationRateLimiter: + return FederationRateLimiter(self.clock, config=self.config.rc_federation) + async def remove_pusher(self, app_id: str, push_key: str, user_id: str): return await self.get_pusherpool().remove_pusher(app_id, push_key, user_id) From 9db4c1b1758c0d6092161c67f9e3a3cf4ff05bdb Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 18 Sep 2020 07:56:20 -0400 Subject: [PATCH 008/134] Add flags to /versions about whether new rooms are encrypted by default. (#8343) --- changelog.d/8343.feature | 1 + synapse/rest/client/versions.py | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+) create mode 100644 changelog.d/8343.feature diff --git a/changelog.d/8343.feature b/changelog.d/8343.feature new file mode 100644 index 0000000000..ccecb22f37 --- /dev/null +++ b/changelog.d/8343.feature @@ -0,0 +1 @@ +Add flags to the `/versions` endpoint that includes whether new rooms default to using E2EE. diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 24ac57f35d..c560edbc59 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -19,6 +19,7 @@ import logging import re +from synapse.api.constants import RoomCreationPreset from synapse.http.servlet import RestServlet logger = logging.getLogger(__name__) @@ -31,6 +32,20 @@ class VersionsRestServlet(RestServlet): super(VersionsRestServlet, self).__init__() self.config = hs.config + # Calculate these once since they shouldn't change after start-up. + self.e2ee_forced_public = ( + RoomCreationPreset.PUBLIC_CHAT + in self.config.encryption_enabled_by_default_for_room_presets + ) + self.e2ee_forced_private = ( + RoomCreationPreset.PRIVATE_CHAT + in self.config.encryption_enabled_by_default_for_room_presets + ) + self.e2ee_forced_trusted_private = ( + RoomCreationPreset.TRUSTED_PRIVATE_CHAT + in self.config.encryption_enabled_by_default_for_room_presets + ) + def on_GET(self, request): return ( 200, @@ -62,6 +77,10 @@ class VersionsRestServlet(RestServlet): "org.matrix.msc2432": True, # Implements additional endpoints as described in MSC2666 "uk.half-shot.msc2666": True, + # Whether new rooms will be set to encrypted or not (based on presets). + "io.element.e2ee_forced.public": self.e2ee_forced_public, + "io.element.e2ee_forced.private": self.e2ee_forced_private, + "io.element.e2ee_forced.trusted_private": self.e2ee_forced_trusted_private, }, }, ) From 7c407efdc80abf2a991844d107a896d629e3965a Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Fri, 18 Sep 2020 13:56:40 +0200 Subject: [PATCH 009/134] Update test logging to be able to accept braces (#8335) --- changelog.d/8335.misc | 1 + tests/test_utils/logging_setup.py | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) create mode 100644 changelog.d/8335.misc diff --git a/changelog.d/8335.misc b/changelog.d/8335.misc new file mode 100644 index 0000000000..7e0a4c7d83 --- /dev/null +++ b/changelog.d/8335.misc @@ -0,0 +1 @@ +Fix test logging to allow braces in log output. \ No newline at end of file diff --git a/tests/test_utils/logging_setup.py b/tests/test_utils/logging_setup.py index 2d96b0fa8d..fdfb840b62 100644 --- a/tests/test_utils/logging_setup.py +++ b/tests/test_utils/logging_setup.py @@ -29,8 +29,7 @@ class ToTwistedHandler(logging.Handler): log_entry = self.format(record) log_level = record.levelname.lower().replace("warning", "warn") self.tx_log.emit( - twisted.logger.LogLevel.levelWithName(log_level), - log_entry.replace("{", r"(").replace("}", r")"), + twisted.logger.LogLevel.levelWithName(log_level), "{entry}", entry=log_entry ) From 43f2b67e4d2ce95b3b13d88e755afc7e3907e82b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 18 Sep 2020 14:25:52 +0100 Subject: [PATCH 010/134] Intelligently select extremities used in backfill. (#8349) Instead of just using the most recent extremities let's pick the ones that will give us results that the pagination request cares about, i.e. pick extremities only if they have a smaller depth than the pagination token. This is useful when we fail to backfill an extremity, as we no longer get stuck requesting that same extremity repeatedly. --- changelog.d/8349.bugfix | 1 + synapse/handlers/federation.py | 65 +++++++++++++++++++++--- synapse/handlers/pagination.py | 8 +-- synapse/storage/databases/main/stream.py | 13 ++--- 4 files changed, 67 insertions(+), 20 deletions(-) create mode 100644 changelog.d/8349.bugfix diff --git a/changelog.d/8349.bugfix b/changelog.d/8349.bugfix new file mode 100644 index 0000000000..cf2f531b14 --- /dev/null +++ b/changelog.d/8349.bugfix @@ -0,0 +1 @@ +Fix a longstanding bug where back pagination over federation could get stuck if it failed to handle a received event. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 43f2986f89..014dab2940 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -943,15 +943,26 @@ class FederationHandler(BaseHandler): return events - async def maybe_backfill(self, room_id, current_depth): + async def maybe_backfill( + self, room_id: str, current_depth: int, limit: int + ) -> bool: """Checks the database to see if we should backfill before paginating, and if so do. + + Args: + room_id + current_depth: The depth from which we're paginating from. This is + used to decide if we should backfill and what extremities to + use. + limit: The number of events that the pagination request will + return. This is used as part of the heuristic to decide if we + should back paginate. """ extremities = await self.store.get_oldest_events_with_depth_in_room(room_id) if not extremities: logger.debug("Not backfilling as no extremeties found.") - return + return False # We only want to paginate if we can actually see the events we'll get, # as otherwise we'll just spend a lot of resources to get redacted @@ -1004,16 +1015,54 @@ class FederationHandler(BaseHandler): sorted_extremeties_tuple = sorted(extremities.items(), key=lambda e: -int(e[1])) max_depth = sorted_extremeties_tuple[0][1] + # If we're approaching an extremity we trigger a backfill, otherwise we + # no-op. + # + # We chose twice the limit here as then clients paginating backwards + # will send pagination requests that trigger backfill at least twice + # using the most recent extremity before it gets removed (see below). We + # chose more than one times the limit in case of failure, but choosing a + # much larger factor will result in triggering a backfill request much + # earlier than necessary. + if current_depth - 2 * limit > max_depth: + logger.debug( + "Not backfilling as we don't need to. %d < %d - 2 * %d", + max_depth, + current_depth, + limit, + ) + return False + + logger.debug( + "room_id: %s, backfill: current_depth: %s, max_depth: %s, extrems: %s", + room_id, + current_depth, + max_depth, + sorted_extremeties_tuple, + ) + + # We ignore extremities that have a greater depth than our current depth + # as: + # 1. we don't really care about getting events that have happened + # before our current position; and + # 2. we have likely previously tried and failed to backfill from that + # extremity, so to avoid getting "stuck" requesting the same + # backfill repeatedly we drop those extremities. + filtered_sorted_extremeties_tuple = [ + t for t in sorted_extremeties_tuple if int(t[1]) <= current_depth + ] + + # However, we need to check that the filtered extremities are non-empty. + # If they are empty then either we can a) bail or b) still attempt to + # backill. We opt to try backfilling anyway just in case we do get + # relevant events. + if filtered_sorted_extremeties_tuple: + sorted_extremeties_tuple = filtered_sorted_extremeties_tuple + # We don't want to specify too many extremities as it causes the backfill # request URI to be too long. extremities = dict(sorted_extremeties_tuple[:5]) - if current_depth > max_depth: - logger.debug( - "Not backfilling as we don't need to. %d < %d", max_depth, current_depth - ) - return - # Now we need to decide which hosts to hit first. # First we try hosts that are already in the room diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 34ed0e2921..6067585f9b 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -362,9 +362,9 @@ class PaginationHandler: # if we're going backwards, we might need to backfill. This # requires that we have a topo token. if room_token.topological: - max_topo = room_token.topological + curr_topo = room_token.topological else: - max_topo = await self.store.get_max_topological_token( + curr_topo = await self.store.get_current_topological_token( room_id, room_token.stream ) @@ -380,11 +380,11 @@ class PaginationHandler: leave_token = await self.store.get_topological_token_for_event( member_event_id ) - if RoomStreamToken.parse(leave_token).topological < max_topo: + if RoomStreamToken.parse(leave_token).topological < curr_topo: source_config.from_key = str(leave_token) await self.hs.get_handlers().federation_handler.maybe_backfill( - room_id, max_topo + room_id, curr_topo, limit=source_config.limit, ) events, next_key = await self.store.paginate_room_events( diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index be6df8a6d1..db20a3db30 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -648,23 +648,20 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ) return "t%d-%d" % (row["topological_ordering"], row["stream_ordering"]) - async def get_max_topological_token(self, room_id: str, stream_key: int) -> int: - """Get the max topological token in a room before the given stream + async def get_current_topological_token(self, room_id: str, stream_key: int) -> int: + """Gets the topological token in a room after or at the given stream ordering. Args: room_id stream_key - - Returns: - The maximum topological token. """ sql = ( - "SELECT coalesce(max(topological_ordering), 0) FROM events" - " WHERE room_id = ? AND stream_ordering < ?" + "SELECT coalesce(MIN(topological_ordering), 0) FROM events" + " WHERE room_id = ? AND stream_ordering >= ?" ) row = await self.db_pool.execute( - "get_max_topological_token", None, sql, room_id, stream_key + "get_current_topological_token", None, sql, room_id, stream_key ) return row[0][0] if row else 0 From 27c1abc7b876ef3e73c8b418a89b987f86147829 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 18 Sep 2020 14:51:11 +0100 Subject: [PATCH 011/134] Use _check_sigs_and_hash_and_fetch to validate backfill requests (#8350) This is a bit of a hack, as `_check_sigs_and_hash_and_fetch` is intended for attempting to pull an event from the database/(re)pull it from the server that originally sent the event if checking the signature of the event fails. During backfill we *know* that we won't have the event in our database, however it is still useful to be able to query the original sending server as the server we're backfilling from may be acting maliciously. The main benefit and reason for this change however is that `_check_sigs_and_hash_and_fetch` will drop an event during backfill if it cannot be successfully validated, whereas the current code will simply fail the backfill request - resulting in the client's /messages request silently being dropped. This is a quick patch to fix backfilling rooms that contain malformed events. A better implementation in planned in future. --- changelog.d/8350.bugfix | 1 + synapse/federation/federation_client.py | 8 +++----- 2 files changed, 4 insertions(+), 5 deletions(-) create mode 100644 changelog.d/8350.bugfix diff --git a/changelog.d/8350.bugfix b/changelog.d/8350.bugfix new file mode 100644 index 0000000000..0e493c0282 --- /dev/null +++ b/changelog.d/8350.bugfix @@ -0,0 +1 @@ +Partially mitigate bug where newly joined servers couldn't get past events in a room when there is a malformed event. \ No newline at end of file diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index a2e8d96ea2..d42930d1b9 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -217,11 +217,9 @@ class FederationClient(FederationBase): for p in transaction_data["pdus"] ] - # FIXME: We should handle signature failures more gracefully. - pdus[:] = await make_deferred_yieldable( - defer.gatherResults( - self._check_sigs_and_hashes(room_version, pdus), consumeErrors=True, - ).addErrback(unwrapFirstError) + # Check signatures and hash of pdus, removing any from the list that fail checks + pdus[:] = await self._check_sigs_and_hash_and_fetch( + dest, pdus, outlier=True, room_version=room_version ) return pdus From 68c7a6936f8921744d083e6dc8a2a085cce30b2a Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Fri, 18 Sep 2020 14:55:13 +0100 Subject: [PATCH 012/134] Allow appservice users to /login (#8320) Add ability for ASes to /login using the `uk.half-shot.msc2778.login.application_service` login `type`. Co-authored-by: Patrick Cloke --- changelog.d/8320.feature | 1 + synapse/rest/client/v1/login.py | 49 ++++++++--- tests/rest/client/v1/test_login.py | 134 ++++++++++++++++++++++++++++- 3 files changed, 173 insertions(+), 11 deletions(-) create mode 100644 changelog.d/8320.feature diff --git a/changelog.d/8320.feature b/changelog.d/8320.feature new file mode 100644 index 0000000000..475a5fe62d --- /dev/null +++ b/changelog.d/8320.feature @@ -0,0 +1 @@ +Add `uk.half-shot.msc2778.login.application_service` login type to allow appservices to login. diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index a14618ac84..dd8cdc0d9f 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -18,6 +18,7 @@ from typing import Awaitable, Callable, Dict, Optional from synapse.api.errors import Codes, LoginError, SynapseError from synapse.api.ratelimiting import Ratelimiter +from synapse.appservice import ApplicationService from synapse.handlers.auth import ( convert_client_dict_legacy_fields_to_identifier, login_id_phone_to_thirdparty, @@ -44,6 +45,7 @@ class LoginRestServlet(RestServlet): TOKEN_TYPE = "m.login.token" JWT_TYPE = "org.matrix.login.jwt" JWT_TYPE_DEPRECATED = "m.login.jwt" + APPSERVICE_TYPE = "uk.half-shot.msc2778.login.application_service" def __init__(self, hs): super(LoginRestServlet, self).__init__() @@ -61,6 +63,8 @@ class LoginRestServlet(RestServlet): self.cas_enabled = hs.config.cas_enabled self.oidc_enabled = hs.config.oidc_enabled + self.auth = hs.get_auth() + self.auth_handler = self.hs.get_auth_handler() self.registration_handler = hs.get_registration_handler() self.handlers = hs.get_handlers() @@ -107,6 +111,8 @@ class LoginRestServlet(RestServlet): ({"type": t} for t in self.auth_handler.get_supported_login_types()) ) + flows.append({"type": LoginRestServlet.APPSERVICE_TYPE}) + return 200, {"flows": flows} def on_OPTIONS(self, request: SynapseRequest): @@ -116,8 +122,12 @@ class LoginRestServlet(RestServlet): self._address_ratelimiter.ratelimit(request.getClientIP()) login_submission = parse_json_object_from_request(request) + try: - if self.jwt_enabled and ( + if login_submission["type"] == LoginRestServlet.APPSERVICE_TYPE: + appservice = self.auth.get_appservice_by_req(request) + result = await self._do_appservice_login(login_submission, appservice) + elif self.jwt_enabled and ( login_submission["type"] == LoginRestServlet.JWT_TYPE or login_submission["type"] == LoginRestServlet.JWT_TYPE_DEPRECATED ): @@ -134,6 +144,33 @@ class LoginRestServlet(RestServlet): result["well_known"] = well_known_data return 200, result + def _get_qualified_user_id(self, identifier): + if identifier["type"] != "m.id.user": + raise SynapseError(400, "Unknown login identifier type") + if "user" not in identifier: + raise SynapseError(400, "User identifier is missing 'user' key") + + if identifier["user"].startswith("@"): + return identifier["user"] + else: + return UserID(identifier["user"], self.hs.hostname).to_string() + + async def _do_appservice_login( + self, login_submission: JsonDict, appservice: ApplicationService + ): + logger.info( + "Got appservice login request with identifier: %r", + login_submission.get("identifier"), + ) + + identifier = convert_client_dict_legacy_fields_to_identifier(login_submission) + qualified_user_id = self._get_qualified_user_id(identifier) + + if not appservice.is_interested_in_user(qualified_user_id): + raise LoginError(403, "Invalid access_token", errcode=Codes.FORBIDDEN) + + return await self._complete_login(qualified_user_id, login_submission) + async def _do_other_login(self, login_submission: JsonDict) -> Dict[str, str]: """Handle non-token/saml/jwt logins @@ -219,15 +256,7 @@ class LoginRestServlet(RestServlet): # by this point, the identifier should be an m.id.user: if it's anything # else, we haven't understood it. - if identifier["type"] != "m.id.user": - raise SynapseError(400, "Unknown login identifier type") - if "user" not in identifier: - raise SynapseError(400, "User identifier is missing 'user' key") - - if identifier["user"].startswith("@"): - qualified_user_id = identifier["user"] - else: - qualified_user_id = UserID(identifier["user"], self.hs.hostname).to_string() + qualified_user_id = self._get_qualified_user_id(identifier) # Check if we've hit the failed ratelimit (but don't update it) self._failed_attempts_ratelimiter.ratelimit( diff --git a/tests/rest/client/v1/test_login.py b/tests/rest/client/v1/test_login.py index 2668662c9e..5d987a30c7 100644 --- a/tests/rest/client/v1/test_login.py +++ b/tests/rest/client/v1/test_login.py @@ -7,8 +7,9 @@ from mock import Mock import jwt import synapse.rest.admin +from synapse.appservice import ApplicationService from synapse.rest.client.v1 import login, logout -from synapse.rest.client.v2_alpha import devices +from synapse.rest.client.v2_alpha import devices, register from synapse.rest.client.v2_alpha.account import WhoamiRestServlet from tests import unittest @@ -748,3 +749,134 @@ class JWTPubKeyTestCase(unittest.HomeserverTestCase): channel.json_body["error"], "JWT validation failed: Signature verification failed", ) + + +AS_USER = "as_user_alice" + + +class AppserviceLoginRestServletTestCase(unittest.HomeserverTestCase): + servlets = [ + login.register_servlets, + register.register_servlets, + ] + + def register_as_user(self, username): + request, channel = self.make_request( + b"POST", + "/_matrix/client/r0/register?access_token=%s" % (self.service.token,), + {"username": username}, + ) + self.render(request) + + def make_homeserver(self, reactor, clock): + self.hs = self.setup_test_homeserver() + + self.service = ApplicationService( + id="unique_identifier", + token="some_token", + hostname="example.com", + sender="@asbot:example.com", + namespaces={ + ApplicationService.NS_USERS: [ + {"regex": r"@as_user.*", "exclusive": False} + ], + ApplicationService.NS_ROOMS: [], + ApplicationService.NS_ALIASES: [], + }, + ) + self.another_service = ApplicationService( + id="another__identifier", + token="another_token", + hostname="example.com", + sender="@as2bot:example.com", + namespaces={ + ApplicationService.NS_USERS: [ + {"regex": r"@as2_user.*", "exclusive": False} + ], + ApplicationService.NS_ROOMS: [], + ApplicationService.NS_ALIASES: [], + }, + ) + + self.hs.get_datastore().services_cache.append(self.service) + self.hs.get_datastore().services_cache.append(self.another_service) + return self.hs + + def test_login_appservice_user(self): + """Test that an appservice user can use /login + """ + self.register_as_user(AS_USER) + + params = { + "type": login.LoginRestServlet.APPSERVICE_TYPE, + "identifier": {"type": "m.id.user", "user": AS_USER}, + } + request, channel = self.make_request( + b"POST", LOGIN_URL, params, access_token=self.service.token + ) + + self.render(request) + self.assertEquals(channel.result["code"], b"200", channel.result) + + def test_login_appservice_user_bot(self): + """Test that the appservice bot can use /login + """ + self.register_as_user(AS_USER) + + params = { + "type": login.LoginRestServlet.APPSERVICE_TYPE, + "identifier": {"type": "m.id.user", "user": self.service.sender}, + } + request, channel = self.make_request( + b"POST", LOGIN_URL, params, access_token=self.service.token + ) + + self.render(request) + self.assertEquals(channel.result["code"], b"200", channel.result) + + def test_login_appservice_wrong_user(self): + """Test that non-as users cannot login with the as token + """ + self.register_as_user(AS_USER) + + params = { + "type": login.LoginRestServlet.APPSERVICE_TYPE, + "identifier": {"type": "m.id.user", "user": "fibble_wibble"}, + } + request, channel = self.make_request( + b"POST", LOGIN_URL, params, access_token=self.service.token + ) + + self.render(request) + self.assertEquals(channel.result["code"], b"403", channel.result) + + def test_login_appservice_wrong_as(self): + """Test that as users cannot login with wrong as token + """ + self.register_as_user(AS_USER) + + params = { + "type": login.LoginRestServlet.APPSERVICE_TYPE, + "identifier": {"type": "m.id.user", "user": AS_USER}, + } + request, channel = self.make_request( + b"POST", LOGIN_URL, params, access_token=self.another_service.token + ) + + self.render(request) + self.assertEquals(channel.result["code"], b"403", channel.result) + + def test_login_appservice_no_token(self): + """Test that users must provide a token when using the appservice + login method + """ + self.register_as_user(AS_USER) + + params = { + "type": login.LoginRestServlet.APPSERVICE_TYPE, + "identifier": {"type": "m.id.user", "user": AS_USER}, + } + request, channel = self.make_request(b"POST", LOGIN_URL, params) + + self.render(request) + self.assertEquals(channel.result["code"], b"401", channel.result) From 8a4a4186ded34bab1ffb4ee1cebcb476890da207 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 18 Sep 2020 09:56:44 -0400 Subject: [PATCH 013/134] Simplify super() calls to Python 3 syntax. (#8344) This converts calls like super(Foo, self) -> super(). Generated with: sed -i "" -Ee 's/super\([^\(]+\)/super()/g' **/*.py --- changelog.d/8344.misc | 1 + scripts-dev/definitions.py | 2 +- scripts-dev/federation_client.py | 2 +- synapse/api/errors.py | 50 +++++++++---------- synapse/api/filtering.py | 2 +- synapse/app/generic_worker.py | 6 +-- synapse/appservice/api.py | 2 +- synapse/config/consent_config.py | 2 +- synapse/config/registration.py | 2 +- synapse/config/server_notices_config.py | 2 +- synapse/crypto/keyring.py | 4 +- synapse/federation/federation_client.py | 2 +- synapse/federation/federation_server.py | 2 +- synapse/federation/transport/server.py | 10 ++-- synapse/groups/groups_server.py | 2 +- synapse/handlers/admin.py | 2 +- synapse/handlers/auth.py | 2 +- synapse/handlers/deactivate_account.py | 2 +- synapse/handlers/device.py | 4 +- synapse/handlers/directory.py | 2 +- synapse/handlers/events.py | 4 +- synapse/handlers/federation.py | 2 +- synapse/handlers/groups_local.py | 2 +- synapse/handlers/identity.py | 2 +- synapse/handlers/initial_sync.py | 2 +- synapse/handlers/profile.py | 4 +- synapse/handlers/read_marker.py | 2 +- synapse/handlers/receipts.py | 2 +- synapse/handlers/register.py | 2 +- synapse/handlers/room.py | 2 +- synapse/handlers/room_list.py | 2 +- synapse/handlers/room_member_worker.py | 2 +- synapse/handlers/search.py | 2 +- synapse/handlers/set_password.py | 2 +- synapse/handlers/user_directory.py | 2 +- synapse/http/__init__.py | 2 +- synapse/logging/formatter.py | 2 +- synapse/logging/scopecontextmanager.py | 6 +-- synapse/push/__init__.py | 2 +- synapse/replication/http/devices.py | 2 +- synapse/replication/http/federation.py | 8 +-- synapse/replication/http/login.py | 2 +- synapse/replication/http/membership.py | 6 +-- synapse/replication/http/register.py | 4 +- synapse/replication/http/send_event.py | 2 +- synapse/replication/slave/storage/_base.py | 2 +- .../replication/slave/storage/account_data.py | 2 +- .../replication/slave/storage/client_ips.py | 2 +- .../replication/slave/storage/deviceinbox.py | 2 +- synapse/replication/slave/storage/devices.py | 2 +- synapse/replication/slave/storage/events.py | 2 +- .../replication/slave/storage/filtering.py | 2 +- synapse/replication/slave/storage/groups.py | 2 +- synapse/replication/slave/storage/presence.py | 2 +- synapse/replication/slave/storage/pushers.py | 2 +- synapse/replication/slave/storage/receipts.py | 2 +- synapse/replication/slave/storage/room.py | 2 +- synapse/replication/tcp/streams/_base.py | 2 +- synapse/rest/admin/devices.py | 2 +- synapse/rest/client/v1/directory.py | 6 +-- synapse/rest/client/v1/events.py | 4 +- synapse/rest/client/v1/initial_sync.py | 2 +- synapse/rest/client/v1/login.py | 4 +- synapse/rest/client/v1/logout.py | 4 +- synapse/rest/client/v1/presence.py | 2 +- synapse/rest/client/v1/profile.py | 6 +-- synapse/rest/client/v1/push_rule.py | 2 +- synapse/rest/client/v1/pusher.py | 6 +-- synapse/rest/client/v1/room.py | 38 +++++++------- synapse/rest/client/v1/voip.py | 2 +- synapse/rest/client/v2_alpha/account.py | 22 ++++---- synapse/rest/client/v2_alpha/account_data.py | 4 +- .../rest/client/v2_alpha/account_validity.py | 4 +- synapse/rest/client/v2_alpha/auth.py | 2 +- synapse/rest/client/v2_alpha/capabilities.py | 2 +- synapse/rest/client/v2_alpha/devices.py | 6 +-- synapse/rest/client/v2_alpha/filter.py | 4 +- synapse/rest/client/v2_alpha/groups.py | 48 +++++++++--------- synapse/rest/client/v2_alpha/keys.py | 12 ++--- synapse/rest/client/v2_alpha/notifications.py | 2 +- synapse/rest/client/v2_alpha/openid.py | 2 +- .../rest/client/v2_alpha/password_policy.py | 2 +- synapse/rest/client/v2_alpha/read_marker.py | 2 +- synapse/rest/client/v2_alpha/receipts.py | 2 +- synapse/rest/client/v2_alpha/register.py | 10 ++-- synapse/rest/client/v2_alpha/relations.py | 8 +-- synapse/rest/client/v2_alpha/report_event.py | 2 +- synapse/rest/client/v2_alpha/room_keys.py | 6 +-- .../v2_alpha/room_upgrade_rest_servlet.py | 2 +- synapse/rest/client/v2_alpha/sendtodevice.py | 2 +- synapse/rest/client/v2_alpha/shared_rooms.py | 2 +- synapse/rest/client/v2_alpha/sync.py | 2 +- synapse/rest/client/v2_alpha/tags.py | 4 +- synapse/rest/client/v2_alpha/thirdparty.py | 8 +-- synapse/rest/client/v2_alpha/tokenrefresh.py | 2 +- .../rest/client/v2_alpha/user_directory.py | 2 +- synapse/rest/client/versions.py | 2 +- synapse/storage/databases/main/__init__.py | 2 +- .../storage/databases/main/account_data.py | 4 +- synapse/storage/databases/main/appservice.py | 2 +- synapse/storage/databases/main/client_ips.py | 4 +- synapse/storage/databases/main/deviceinbox.py | 4 +- synapse/storage/databases/main/devices.py | 4 +- .../databases/main/event_federation.py | 2 +- .../databases/main/event_push_actions.py | 4 +- .../databases/main/events_bg_updates.py | 2 +- .../storage/databases/main/events_worker.py | 2 +- .../databases/main/media_repository.py | 6 +-- .../databases/main/monthly_active_users.py | 4 +- synapse/storage/databases/main/push_rule.py | 2 +- synapse/storage/databases/main/receipts.py | 4 +- .../storage/databases/main/registration.py | 6 +-- synapse/storage/databases/main/room.py | 6 +-- synapse/storage/databases/main/roommember.py | 6 +-- synapse/storage/databases/main/search.py | 4 +- synapse/storage/databases/main/state.py | 6 +-- synapse/storage/databases/main/stats.py | 2 +- synapse/storage/databases/main/stream.py | 2 +- .../storage/databases/main/transactions.py | 2 +- .../storage/databases/main/user_directory.py | 4 +- synapse/storage/databases/state/bg_updates.py | 2 +- synapse/storage/databases/state/store.py | 2 +- synapse/util/manhole.py | 2 +- synapse/util/retryutils.py | 2 +- tests/handlers/test_e2e_keys.py | 2 +- tests/handlers/test_e2e_room_keys.py | 2 +- .../replication/slave/storage/test_events.py | 2 +- tests/rest/test_well_known.py | 2 +- tests/server.py | 2 +- tests/storage/test_appservice.py | 2 +- tests/storage/test_devices.py | 2 +- tests/test_state.py | 2 +- tests/unittest.py | 2 +- 133 files changed, 272 insertions(+), 281 deletions(-) create mode 100644 changelog.d/8344.misc diff --git a/changelog.d/8344.misc b/changelog.d/8344.misc new file mode 100644 index 0000000000..0b342d5137 --- /dev/null +++ b/changelog.d/8344.misc @@ -0,0 +1 @@ +Simplify `super()` calls to Python 3 syntax. diff --git a/scripts-dev/definitions.py b/scripts-dev/definitions.py index 15e6ce6e16..313860df13 100755 --- a/scripts-dev/definitions.py +++ b/scripts-dev/definitions.py @@ -11,7 +11,7 @@ import yaml class DefinitionVisitor(ast.NodeVisitor): def __init__(self): - super(DefinitionVisitor, self).__init__() + super().__init__() self.functions = {} self.classes = {} self.names = {} diff --git a/scripts-dev/federation_client.py b/scripts-dev/federation_client.py index 848a826f17..abcec48c4f 100755 --- a/scripts-dev/federation_client.py +++ b/scripts-dev/federation_client.py @@ -321,7 +321,7 @@ class MatrixConnectionAdapter(HTTPAdapter): url = urlparse.urlunparse( ("https", netloc, parsed.path, parsed.params, parsed.query, parsed.fragment) ) - return super(MatrixConnectionAdapter, self).get_connection(url, proxies) + return super().get_connection(url, proxies) if __name__ == "__main__": diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 94a9e58eae..cd6670d0a2 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -87,7 +87,7 @@ class CodeMessageException(RuntimeError): """ def __init__(self, code: Union[int, HTTPStatus], msg: str): - super(CodeMessageException, self).__init__("%d: %s" % (code, msg)) + super().__init__("%d: %s" % (code, msg)) # Some calls to this method pass instances of http.HTTPStatus for `code`. # While HTTPStatus is a subclass of int, it has magic __str__ methods @@ -138,7 +138,7 @@ class SynapseError(CodeMessageException): msg: The human-readable error message. errcode: The matrix error code e.g 'M_FORBIDDEN' """ - super(SynapseError, self).__init__(code, msg) + super().__init__(code, msg) self.errcode = errcode def error_dict(self): @@ -159,7 +159,7 @@ class ProxiedRequestError(SynapseError): errcode: str = Codes.UNKNOWN, additional_fields: Optional[Dict] = None, ): - super(ProxiedRequestError, self).__init__(code, msg, errcode) + super().__init__(code, msg, errcode) if additional_fields is None: self._additional_fields = {} # type: Dict else: @@ -181,7 +181,7 @@ class ConsentNotGivenError(SynapseError): msg: The human-readable error message consent_url: The URL where the user can give their consent """ - super(ConsentNotGivenError, self).__init__( + super().__init__( code=HTTPStatus.FORBIDDEN, msg=msg, errcode=Codes.CONSENT_NOT_GIVEN ) self._consent_uri = consent_uri @@ -201,7 +201,7 @@ class UserDeactivatedError(SynapseError): Args: msg: The human-readable error message """ - super(UserDeactivatedError, self).__init__( + super().__init__( code=HTTPStatus.FORBIDDEN, msg=msg, errcode=Codes.USER_DEACTIVATED ) @@ -225,7 +225,7 @@ class FederationDeniedError(SynapseError): self.destination = destination - super(FederationDeniedError, self).__init__( + super().__init__( code=403, msg="Federation denied with %s." % (self.destination,), errcode=Codes.FORBIDDEN, @@ -244,9 +244,7 @@ class InteractiveAuthIncompleteError(Exception): """ def __init__(self, session_id: str, result: "JsonDict"): - super(InteractiveAuthIncompleteError, self).__init__( - "Interactive auth not yet complete" - ) + super().__init__("Interactive auth not yet complete") self.session_id = session_id self.result = result @@ -261,14 +259,14 @@ class UnrecognizedRequestError(SynapseError): message = "Unrecognized request" else: message = args[0] - super(UnrecognizedRequestError, self).__init__(400, message, **kwargs) + super().__init__(400, message, **kwargs) class NotFoundError(SynapseError): """An error indicating we can't find the thing you asked for""" def __init__(self, msg: str = "Not found", errcode: str = Codes.NOT_FOUND): - super(NotFoundError, self).__init__(404, msg, errcode=errcode) + super().__init__(404, msg, errcode=errcode) class AuthError(SynapseError): @@ -279,7 +277,7 @@ class AuthError(SynapseError): def __init__(self, *args, **kwargs): if "errcode" not in kwargs: kwargs["errcode"] = Codes.FORBIDDEN - super(AuthError, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) class InvalidClientCredentialsError(SynapseError): @@ -335,7 +333,7 @@ class ResourceLimitError(SynapseError): ): self.admin_contact = admin_contact self.limit_type = limit_type - super(ResourceLimitError, self).__init__(code, msg, errcode=errcode) + super().__init__(code, msg, errcode=errcode) def error_dict(self): return cs_error( @@ -352,7 +350,7 @@ class EventSizeError(SynapseError): def __init__(self, *args, **kwargs): if "errcode" not in kwargs: kwargs["errcode"] = Codes.TOO_LARGE - super(EventSizeError, self).__init__(413, *args, **kwargs) + super().__init__(413, *args, **kwargs) class EventStreamError(SynapseError): @@ -361,7 +359,7 @@ class EventStreamError(SynapseError): def __init__(self, *args, **kwargs): if "errcode" not in kwargs: kwargs["errcode"] = Codes.BAD_PAGINATION - super(EventStreamError, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) class LoginError(SynapseError): @@ -384,7 +382,7 @@ class InvalidCaptchaError(SynapseError): error_url: Optional[str] = None, errcode: str = Codes.CAPTCHA_INVALID, ): - super(InvalidCaptchaError, self).__init__(code, msg, errcode) + super().__init__(code, msg, errcode) self.error_url = error_url def error_dict(self): @@ -402,7 +400,7 @@ class LimitExceededError(SynapseError): retry_after_ms: Optional[int] = None, errcode: str = Codes.LIMIT_EXCEEDED, ): - super(LimitExceededError, self).__init__(code, msg, errcode) + super().__init__(code, msg, errcode) self.retry_after_ms = retry_after_ms def error_dict(self): @@ -418,9 +416,7 @@ class RoomKeysVersionError(SynapseError): Args: current_version: the current version of the store they should have used """ - super(RoomKeysVersionError, self).__init__( - 403, "Wrong room_keys version", Codes.WRONG_ROOM_KEYS_VERSION - ) + super().__init__(403, "Wrong room_keys version", Codes.WRONG_ROOM_KEYS_VERSION) self.current_version = current_version @@ -429,7 +425,7 @@ class UnsupportedRoomVersionError(SynapseError): not support.""" def __init__(self, msg: str = "Homeserver does not support this room version"): - super(UnsupportedRoomVersionError, self).__init__( + super().__init__( code=400, msg=msg, errcode=Codes.UNSUPPORTED_ROOM_VERSION, ) @@ -440,7 +436,7 @@ class ThreepidValidationError(SynapseError): def __init__(self, *args, **kwargs): if "errcode" not in kwargs: kwargs["errcode"] = Codes.FORBIDDEN - super(ThreepidValidationError, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) class IncompatibleRoomVersionError(SynapseError): @@ -451,7 +447,7 @@ class IncompatibleRoomVersionError(SynapseError): """ def __init__(self, room_version: str): - super(IncompatibleRoomVersionError, self).__init__( + super().__init__( code=400, msg="Your homeserver does not support the features required to " "join this room", @@ -473,7 +469,7 @@ class PasswordRefusedError(SynapseError): msg: str = "This password doesn't comply with the server's policy", errcode: str = Codes.WEAK_PASSWORD, ): - super(PasswordRefusedError, self).__init__( + super().__init__( code=400, msg=msg, errcode=errcode, ) @@ -488,7 +484,7 @@ class RequestSendFailed(RuntimeError): """ def __init__(self, inner_exception, can_retry): - super(RequestSendFailed, self).__init__( + super().__init__( "Failed to send request: %s: %s" % (type(inner_exception).__name__, inner_exception) ) @@ -542,7 +538,7 @@ class FederationError(RuntimeError): self.source = source msg = "%s %s: %s" % (level, code, reason) - super(FederationError, self).__init__(msg) + super().__init__(msg) def get_dict(self): return { @@ -570,7 +566,7 @@ class HttpResponseException(CodeMessageException): msg: reason phrase from HTTP response status line response: body of response """ - super(HttpResponseException, self).__init__(code, msg) + super().__init__(code, msg) self.response = response def to_synapse_error(self): diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index bb33345be6..5caf336fd0 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -132,7 +132,7 @@ def matrix_user_id_validator(user_id_str): class Filtering: def __init__(self, hs): - super(Filtering, self).__init__() + super().__init__() self.store = hs.get_datastore() async def get_user_filter(self, user_localpart, filter_id): diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index f985810e88..c38413c893 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -152,7 +152,7 @@ class PresenceStatusStubServlet(RestServlet): PATTERNS = client_patterns("/presence/(?P[^/]*)/status") def __init__(self, hs): - super(PresenceStatusStubServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() async def on_GET(self, request, user_id): @@ -176,7 +176,7 @@ class KeyUploadServlet(RestServlet): Args: hs (synapse.server.HomeServer): server """ - super(KeyUploadServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastore() self.http_client = hs.get_simple_http_client() @@ -646,7 +646,7 @@ class GenericWorkerServer(HomeServer): class GenericWorkerReplicationHandler(ReplicationDataHandler): def __init__(self, hs): - super(GenericWorkerReplicationHandler, self).__init__(hs) + super().__init__(hs) self.store = hs.get_datastore() self.presence_handler = hs.get_presence_handler() # type: GenericWorkerPresence diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index bb6fa8299a..1514c0f691 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -88,7 +88,7 @@ class ApplicationServiceApi(SimpleHttpClient): """ def __init__(self, hs): - super(ApplicationServiceApi, self).__init__(hs) + super().__init__(hs) self.clock = hs.get_clock() self.protocol_meta_cache = ResponseCache( diff --git a/synapse/config/consent_config.py b/synapse/config/consent_config.py index aec9c4bbce..fbddebeeab 100644 --- a/synapse/config/consent_config.py +++ b/synapse/config/consent_config.py @@ -77,7 +77,7 @@ class ConsentConfig(Config): section = "consent" def __init__(self, *args): - super(ConsentConfig, self).__init__(*args) + super().__init__(*args) self.user_consent_version = None self.user_consent_template_dir = None diff --git a/synapse/config/registration.py b/synapse/config/registration.py index a185655774..5ffbb934fe 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -30,7 +30,7 @@ class AccountValidityConfig(Config): def __init__(self, config, synapse_config): if config is None: return - super(AccountValidityConfig, self).__init__() + super().__init__() self.enabled = config.get("enabled", False) self.renew_by_email_enabled = "renew_at" in config diff --git a/synapse/config/server_notices_config.py b/synapse/config/server_notices_config.py index 6c427b6f92..57f69dc8e2 100644 --- a/synapse/config/server_notices_config.py +++ b/synapse/config/server_notices_config.py @@ -62,7 +62,7 @@ class ServerNoticesConfig(Config): section = "servernotices" def __init__(self, *args): - super(ServerNoticesConfig, self).__init__(*args) + super().__init__(*args) self.server_notices_mxid = None self.server_notices_mxid_display_name = None self.server_notices_mxid_avatar_url = None diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 32c31b1cd1..42e4087a92 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -558,7 +558,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): """KeyFetcher impl which fetches keys from the "perspectives" servers""" def __init__(self, hs): - super(PerspectivesKeyFetcher, self).__init__(hs) + super().__init__(hs) self.clock = hs.get_clock() self.client = hs.get_http_client() self.key_servers = self.config.key_servers @@ -728,7 +728,7 @@ class ServerKeyFetcher(BaseV2KeyFetcher): """KeyFetcher impl which fetches keys from the origin servers""" def __init__(self, hs): - super(ServerKeyFetcher, self).__init__(hs) + super().__init__(hs) self.clock = hs.get_clock() self.client = hs.get_http_client() diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index a2e8d96ea2..639d19f696 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -79,7 +79,7 @@ class InvalidResponseError(RuntimeError): class FederationClient(FederationBase): def __init__(self, hs): - super(FederationClient, self).__init__(hs) + super().__init__(hs) self.pdu_destination_tried = {} self._clock.looping_call(self._clear_tried_cache, 60 * 1000) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index ff00f0b302..2dcd081cbc 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -90,7 +90,7 @@ pdu_process_time = Histogram( class FederationServer(FederationBase): def __init__(self, hs): - super(FederationServer, self).__init__(hs) + super().__init__(hs) self.auth = hs.get_auth() self.handler = hs.get_handlers().federation_handler diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index cc7e9a973b..3a6b95631e 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -68,7 +68,7 @@ class TransportLayerServer(JsonResource): self.clock = hs.get_clock() self.servlet_groups = servlet_groups - super(TransportLayerServer, self).__init__(hs, canonical_json=False) + super().__init__(hs, canonical_json=False) self.authenticator = Authenticator(hs) self.ratelimiter = hs.get_federation_ratelimiter() @@ -376,9 +376,7 @@ class FederationSendServlet(BaseFederationServlet): RATELIMIT = False def __init__(self, handler, server_name, **kwargs): - super(FederationSendServlet, self).__init__( - handler, server_name=server_name, **kwargs - ) + super().__init__(handler, server_name=server_name, **kwargs) self.server_name = server_name # This is when someone is trying to send us a bunch of data. @@ -773,9 +771,7 @@ class PublicRoomList(BaseFederationServlet): PATH = "/publicRooms" def __init__(self, handler, authenticator, ratelimiter, server_name, allow_access): - super(PublicRoomList, self).__init__( - handler, authenticator, ratelimiter, server_name - ) + super().__init__(handler, authenticator, ratelimiter, server_name) self.allow_access = allow_access async def on_GET(self, origin, content, query): diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 1dd20ee4e1..e5f85b472d 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -336,7 +336,7 @@ class GroupsServerWorkerHandler: class GroupsServerHandler(GroupsServerWorkerHandler): def __init__(self, hs): - super(GroupsServerHandler, self).__init__(hs) + super().__init__(hs) # Ensure attestations get renewed hs.get_groups_attestation_renewer() diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 5e5a64037d..dd981c597e 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -28,7 +28,7 @@ logger = logging.getLogger(__name__) class AdminHandler(BaseHandler): def __init__(self, hs): - super(AdminHandler, self).__init__(hs) + super().__init__(hs) self.storage = hs.get_storage() self.state_store = self.storage.state diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 4e658d9a48..0322b60cfc 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -145,7 +145,7 @@ class AuthHandler(BaseHandler): Args: hs (synapse.server.HomeServer): """ - super(AuthHandler, self).__init__(hs) + super().__init__(hs) self.checkers = {} # type: Dict[str, UserInteractiveAuthChecker] for auth_checker_class in INTERACTIVE_AUTH_CHECKERS: diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index 25169157c1..0635ad5708 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -29,7 +29,7 @@ class DeactivateAccountHandler(BaseHandler): """Handler which deals with deactivating user accounts.""" def __init__(self, hs): - super(DeactivateAccountHandler, self).__init__(hs) + super().__init__(hs) self.hs = hs self._auth_handler = hs.get_auth_handler() self._device_handler = hs.get_device_handler() diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 4b0a4f96cc..55a9787439 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -48,7 +48,7 @@ MAX_DEVICE_DISPLAY_NAME_LEN = 100 class DeviceWorkerHandler(BaseHandler): def __init__(self, hs): - super(DeviceWorkerHandler, self).__init__(hs) + super().__init__(hs) self.hs = hs self.state = hs.get_state_handler() @@ -251,7 +251,7 @@ class DeviceWorkerHandler(BaseHandler): class DeviceHandler(DeviceWorkerHandler): def __init__(self, hs): - super(DeviceHandler, self).__init__(hs) + super().__init__(hs) self.federation_sender = hs.get_federation_sender() diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 46826eb784..62aa9a2da8 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -37,7 +37,7 @@ logger = logging.getLogger(__name__) class DirectoryHandler(BaseHandler): def __init__(self, hs): - super(DirectoryHandler, self).__init__(hs) + super().__init__(hs) self.state = hs.get_state_handler() self.appservice_handler = hs.get_application_service_handler() diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index fdce54c5c3..0875b74ea8 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -37,7 +37,7 @@ logger = logging.getLogger(__name__) class EventStreamHandler(BaseHandler): def __init__(self, hs: "HomeServer"): - super(EventStreamHandler, self).__init__(hs) + super().__init__(hs) self.clock = hs.get_clock() @@ -142,7 +142,7 @@ class EventStreamHandler(BaseHandler): class EventHandler(BaseHandler): def __init__(self, hs: "HomeServer"): - super(EventHandler, self).__init__(hs) + super().__init__(hs) self.storage = hs.get_storage() async def get_event( diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 262901363f..96eeff7b1b 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -115,7 +115,7 @@ class FederationHandler(BaseHandler): """ def __init__(self, hs): - super(FederationHandler, self).__init__(hs) + super().__init__(hs) self.hs = hs diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 44df567983..9684e60fc8 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -240,7 +240,7 @@ class GroupsLocalWorkerHandler: class GroupsLocalHandler(GroupsLocalWorkerHandler): def __init__(self, hs): - super(GroupsLocalHandler, self).__init__(hs) + super().__init__(hs) # Ensure attestations get renewed hs.get_groups_attestation_renewer() diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 0ce6ddfbe4..ab15570f7a 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -45,7 +45,7 @@ id_server_scheme = "https://" class IdentityHandler(BaseHandler): def __init__(self, hs): - super(IdentityHandler, self).__init__(hs) + super().__init__(hs) self.http_client = SimpleHttpClient(hs) # We create a blacklisting instance of SimpleHttpClient for contacting identity diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index ba4828c713..8cd7eb22a3 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -42,7 +42,7 @@ logger = logging.getLogger(__name__) class InitialSyncHandler(BaseHandler): def __init__(self, hs: "HomeServer"): - super(InitialSyncHandler, self).__init__(hs) + super().__init__(hs) self.hs = hs self.state = hs.get_state_handler() self.clock = hs.get_clock() diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 0cb8fad89a..5453e6dfc8 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -44,7 +44,7 @@ class BaseProfileHandler(BaseHandler): """ def __init__(self, hs): - super(BaseProfileHandler, self).__init__(hs) + super().__init__(hs) self.federation = hs.get_federation_client() hs.get_federation_registry().register_query_handler( @@ -369,7 +369,7 @@ class MasterProfileHandler(BaseProfileHandler): PROFILE_UPDATE_EVERY_MS = 24 * 60 * 60 * 1000 def __init__(self, hs): - super(MasterProfileHandler, self).__init__(hs) + super().__init__(hs) assert hs.config.worker_app is None diff --git a/synapse/handlers/read_marker.py b/synapse/handlers/read_marker.py index e3b528d271..c32f314a1c 100644 --- a/synapse/handlers/read_marker.py +++ b/synapse/handlers/read_marker.py @@ -24,7 +24,7 @@ logger = logging.getLogger(__name__) class ReadMarkerHandler(BaseHandler): def __init__(self, hs): - super(ReadMarkerHandler, self).__init__(hs) + super().__init__(hs) self.server_name = hs.config.server_name self.store = hs.get_datastore() self.read_marker_linearizer = Linearizer(name="read_marker") diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index bdd8e52edd..7225923757 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -23,7 +23,7 @@ logger = logging.getLogger(__name__) class ReceiptsHandler(BaseHandler): def __init__(self, hs): - super(ReceiptsHandler, self).__init__(hs) + super().__init__(hs) self.server_name = hs.config.server_name self.store = hs.get_datastore() diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index cde2dbca92..538f4b2a61 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -42,7 +42,7 @@ class RegistrationHandler(BaseHandler): Args: hs (synapse.server.HomeServer): """ - super(RegistrationHandler, self).__init__(hs) + super().__init__(hs) self.hs = hs self.auth = hs.get_auth() self._auth_handler = hs.get_auth_handler() diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index eeade6ad3f..11bf146bed 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -70,7 +70,7 @@ FIVE_MINUTES_IN_MS = 5 * 60 * 1000 class RoomCreationHandler(BaseHandler): def __init__(self, hs: "HomeServer"): - super(RoomCreationHandler, self).__init__(hs) + super().__init__(hs) self.spam_checker = hs.get_spam_checker() self.event_creation_handler = hs.get_event_creation_handler() diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index 5dd7b28391..4a13c8e912 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -38,7 +38,7 @@ EMPTY_THIRD_PARTY_ID = ThirdPartyInstanceID(None, None) class RoomListHandler(BaseHandler): def __init__(self, hs): - super(RoomListHandler, self).__init__(hs) + super().__init__(hs) self.enable_room_list_search = hs.config.enable_room_list_search self.response_cache = ResponseCache(hs, "room_list") self.remote_response_cache = ResponseCache( diff --git a/synapse/handlers/room_member_worker.py b/synapse/handlers/room_member_worker.py index e7f34737c6..f2e88f6a5b 100644 --- a/synapse/handlers/room_member_worker.py +++ b/synapse/handlers/room_member_worker.py @@ -30,7 +30,7 @@ logger = logging.getLogger(__name__) class RoomMemberWorkerHandler(RoomMemberHandler): def __init__(self, hs): - super(RoomMemberWorkerHandler, self).__init__(hs) + super().__init__(hs) self._remote_join_client = ReplRemoteJoin.make_client(hs) self._remote_reject_client = ReplRejectInvite.make_client(hs) diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index d58f9788c5..6a76c20d79 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -32,7 +32,7 @@ logger = logging.getLogger(__name__) class SearchHandler(BaseHandler): def __init__(self, hs): - super(SearchHandler, self).__init__(hs) + super().__init__(hs) self._event_serializer = hs.get_event_client_serializer() self.storage = hs.get_storage() self.state_store = self.storage.state diff --git a/synapse/handlers/set_password.py b/synapse/handlers/set_password.py index 4d245b618b..a5d67f828f 100644 --- a/synapse/handlers/set_password.py +++ b/synapse/handlers/set_password.py @@ -27,7 +27,7 @@ class SetPasswordHandler(BaseHandler): """Handler which deals with changing user account passwords""" def __init__(self, hs): - super(SetPasswordHandler, self).__init__(hs) + super().__init__(hs) self._auth_handler = hs.get_auth_handler() self._device_handler = hs.get_device_handler() self._password_policy_handler = hs.get_password_policy_handler() diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index e21f8dbc58..79393c8829 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -37,7 +37,7 @@ class UserDirectoryHandler(StateDeltasHandler): """ def __init__(self, hs): - super(UserDirectoryHandler, self).__init__(hs) + super().__init__(hs) self.store = hs.get_datastore() self.state = hs.get_state_handler() diff --git a/synapse/http/__init__.py b/synapse/http/__init__.py index 3880ce0d94..8eb3638591 100644 --- a/synapse/http/__init__.py +++ b/synapse/http/__init__.py @@ -27,7 +27,7 @@ class RequestTimedOutError(SynapseError): """Exception representing timeout of an outbound request""" def __init__(self): - super(RequestTimedOutError, self).__init__(504, "Timed out") + super().__init__(504, "Timed out") def cancelled_to_request_timed_out_error(value, timeout): diff --git a/synapse/logging/formatter.py b/synapse/logging/formatter.py index d736ad5b9b..11f60a77f7 100644 --- a/synapse/logging/formatter.py +++ b/synapse/logging/formatter.py @@ -30,7 +30,7 @@ class LogFormatter(logging.Formatter): """ def __init__(self, *args, **kwargs): - super(LogFormatter, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def formatException(self, ei): sio = StringIO() diff --git a/synapse/logging/scopecontextmanager.py b/synapse/logging/scopecontextmanager.py index 026854b4c7..7b9c657456 100644 --- a/synapse/logging/scopecontextmanager.py +++ b/synapse/logging/scopecontextmanager.py @@ -107,7 +107,7 @@ class _LogContextScope(Scope): finish_on_close (Boolean): if True finish the span when the scope is closed """ - super(_LogContextScope, self).__init__(manager, span) + super().__init__(manager, span) self.logcontext = logcontext self._finish_on_close = finish_on_close self._enter_logcontext = enter_logcontext @@ -120,9 +120,9 @@ class _LogContextScope(Scope): def __exit__(self, type, value, traceback): if type == twisted.internet.defer._DefGen_Return: - super(_LogContextScope, self).__exit__(None, None, None) + super().__exit__(None, None, None) else: - super(_LogContextScope, self).__exit__(type, value, traceback) + super().__exit__(type, value, traceback) if self._enter_logcontext: self.logcontext.__exit__(type, value, traceback) else: # the logcontext existed before the creation of the scope diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index edf45dc599..5a437f9810 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -16,4 +16,4 @@ class PusherConfigException(Exception): def __init__(self, msg): - super(PusherConfigException, self).__init__(msg) + super().__init__(msg) diff --git a/synapse/replication/http/devices.py b/synapse/replication/http/devices.py index 20f3ba76c0..807b85d2e1 100644 --- a/synapse/replication/http/devices.py +++ b/synapse/replication/http/devices.py @@ -53,7 +53,7 @@ class ReplicationUserDevicesResyncRestServlet(ReplicationEndpoint): CACHE = False def __init__(self, hs): - super(ReplicationUserDevicesResyncRestServlet, self).__init__(hs) + super().__init__(hs) self.device_list_updater = hs.get_device_handler().device_list_updater self.store = hs.get_datastore() diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py index 5c8be747e1..5393b9a9e7 100644 --- a/synapse/replication/http/federation.py +++ b/synapse/replication/http/federation.py @@ -57,7 +57,7 @@ class ReplicationFederationSendEventsRestServlet(ReplicationEndpoint): PATH_ARGS = () def __init__(self, hs): - super(ReplicationFederationSendEventsRestServlet, self).__init__(hs) + super().__init__(hs) self.store = hs.get_datastore() self.storage = hs.get_storage() @@ -150,7 +150,7 @@ class ReplicationFederationSendEduRestServlet(ReplicationEndpoint): PATH_ARGS = ("edu_type",) def __init__(self, hs): - super(ReplicationFederationSendEduRestServlet, self).__init__(hs) + super().__init__(hs) self.store = hs.get_datastore() self.clock = hs.get_clock() @@ -193,7 +193,7 @@ class ReplicationGetQueryRestServlet(ReplicationEndpoint): CACHE = False def __init__(self, hs): - super(ReplicationGetQueryRestServlet, self).__init__(hs) + super().__init__(hs) self.store = hs.get_datastore() self.clock = hs.get_clock() @@ -236,7 +236,7 @@ class ReplicationCleanRoomRestServlet(ReplicationEndpoint): PATH_ARGS = ("room_id",) def __init__(self, hs): - super(ReplicationCleanRoomRestServlet, self).__init__(hs) + super().__init__(hs) self.store = hs.get_datastore() diff --git a/synapse/replication/http/login.py b/synapse/replication/http/login.py index fb326bb869..4c81e2d784 100644 --- a/synapse/replication/http/login.py +++ b/synapse/replication/http/login.py @@ -32,7 +32,7 @@ class RegisterDeviceReplicationServlet(ReplicationEndpoint): PATH_ARGS = ("user_id",) def __init__(self, hs): - super(RegisterDeviceReplicationServlet, self).__init__(hs) + super().__init__(hs) self.registration_handler = hs.get_registration_handler() @staticmethod diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py index 08095fdf7d..30680baee8 100644 --- a/synapse/replication/http/membership.py +++ b/synapse/replication/http/membership.py @@ -45,7 +45,7 @@ class ReplicationRemoteJoinRestServlet(ReplicationEndpoint): PATH_ARGS = ("room_id", "user_id") def __init__(self, hs): - super(ReplicationRemoteJoinRestServlet, self).__init__(hs) + super().__init__(hs) self.federation_handler = hs.get_handlers().federation_handler self.store = hs.get_datastore() @@ -107,7 +107,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint): PATH_ARGS = ("invite_event_id",) def __init__(self, hs: "HomeServer"): - super(ReplicationRemoteRejectInviteRestServlet, self).__init__(hs) + super().__init__(hs) self.store = hs.get_datastore() self.clock = hs.get_clock() @@ -168,7 +168,7 @@ class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint): CACHE = False # No point caching as should return instantly. def __init__(self, hs): - super(ReplicationUserJoinedLeftRoomRestServlet, self).__init__(hs) + super().__init__(hs) self.registeration_handler = hs.get_registration_handler() self.store = hs.get_datastore() diff --git a/synapse/replication/http/register.py b/synapse/replication/http/register.py index a02b27474d..7b12ec9060 100644 --- a/synapse/replication/http/register.py +++ b/synapse/replication/http/register.py @@ -29,7 +29,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint): PATH_ARGS = ("user_id",) def __init__(self, hs): - super(ReplicationRegisterServlet, self).__init__(hs) + super().__init__(hs) self.store = hs.get_datastore() self.registration_handler = hs.get_registration_handler() @@ -104,7 +104,7 @@ class ReplicationPostRegisterActionsServlet(ReplicationEndpoint): PATH_ARGS = ("user_id",) def __init__(self, hs): - super(ReplicationPostRegisterActionsServlet, self).__init__(hs) + super().__init__(hs) self.store = hs.get_datastore() self.registration_handler = hs.get_registration_handler() diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py index f13d452426..9a3a694d5d 100644 --- a/synapse/replication/http/send_event.py +++ b/synapse/replication/http/send_event.py @@ -52,7 +52,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint): PATH_ARGS = ("event_id",) def __init__(self, hs): - super(ReplicationSendEventRestServlet, self).__init__(hs) + super().__init__(hs) self.event_creation_handler = hs.get_event_creation_handler() self.store = hs.get_datastore() diff --git a/synapse/replication/slave/storage/_base.py b/synapse/replication/slave/storage/_base.py index 60f2e1245f..d25fa49e1a 100644 --- a/synapse/replication/slave/storage/_base.py +++ b/synapse/replication/slave/storage/_base.py @@ -26,7 +26,7 @@ logger = logging.getLogger(__name__) class BaseSlavedStore(CacheInvalidationWorkerStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(BaseSlavedStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) if isinstance(self.database_engine, PostgresEngine): self._cache_id_gen = MultiWriterIdGenerator( db_conn, diff --git a/synapse/replication/slave/storage/account_data.py b/synapse/replication/slave/storage/account_data.py index bb66ba9b80..4268565fc8 100644 --- a/synapse/replication/slave/storage/account_data.py +++ b/synapse/replication/slave/storage/account_data.py @@ -34,7 +34,7 @@ class SlavedAccountDataStore(TagsWorkerStore, AccountDataWorkerStore, BaseSlaved ], ) - super(SlavedAccountDataStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) def get_max_account_data_stream_id(self): return self._account_data_id_gen.get_current_token() diff --git a/synapse/replication/slave/storage/client_ips.py b/synapse/replication/slave/storage/client_ips.py index a6fdedde63..1f8dafe7ea 100644 --- a/synapse/replication/slave/storage/client_ips.py +++ b/synapse/replication/slave/storage/client_ips.py @@ -22,7 +22,7 @@ from ._base import BaseSlavedStore class SlavedClientIpStore(BaseSlavedStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(SlavedClientIpStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.client_ip_last_seen = Cache( name="client_ip_last_seen", keylen=4, max_entries=50000 diff --git a/synapse/replication/slave/storage/deviceinbox.py b/synapse/replication/slave/storage/deviceinbox.py index 533d927701..5b045bed02 100644 --- a/synapse/replication/slave/storage/deviceinbox.py +++ b/synapse/replication/slave/storage/deviceinbox.py @@ -24,7 +24,7 @@ from synapse.util.caches.stream_change_cache import StreamChangeCache class SlavedDeviceInboxStore(DeviceInboxWorkerStore, BaseSlavedStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(SlavedDeviceInboxStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self._device_inbox_id_gen = SlavedIdTracker( db_conn, "device_inbox", "stream_id" ) diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py index 3b788c9625..e0d86240dd 100644 --- a/synapse/replication/slave/storage/devices.py +++ b/synapse/replication/slave/storage/devices.py @@ -24,7 +24,7 @@ from synapse.util.caches.stream_change_cache import StreamChangeCache class SlavedDeviceStore(EndToEndKeyWorkerStore, DeviceWorkerStore, BaseSlavedStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(SlavedDeviceStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.hs = hs diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py index da1cc836cf..fbffe6d85c 100644 --- a/synapse/replication/slave/storage/events.py +++ b/synapse/replication/slave/storage/events.py @@ -56,7 +56,7 @@ class SlavedEventStore( BaseSlavedStore, ): def __init__(self, database: DatabasePool, db_conn, hs): - super(SlavedEventStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) events_max = self._stream_id_gen.get_current_token() curr_state_delta_prefill, min_curr_state_delta_id = self.db_pool.get_cache_dict( diff --git a/synapse/replication/slave/storage/filtering.py b/synapse/replication/slave/storage/filtering.py index 2562b6fc38..6a23252861 100644 --- a/synapse/replication/slave/storage/filtering.py +++ b/synapse/replication/slave/storage/filtering.py @@ -21,7 +21,7 @@ from ._base import BaseSlavedStore class SlavedFilteringStore(BaseSlavedStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(SlavedFilteringStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) # Filters are immutable so this cache doesn't need to be expired get_user_filter = FilteringStore.__dict__["get_user_filter"] diff --git a/synapse/replication/slave/storage/groups.py b/synapse/replication/slave/storage/groups.py index 567b4a5cc1..30955bcbfe 100644 --- a/synapse/replication/slave/storage/groups.py +++ b/synapse/replication/slave/storage/groups.py @@ -23,7 +23,7 @@ from synapse.util.caches.stream_change_cache import StreamChangeCache class SlavedGroupServerStore(GroupServerWorkerStore, BaseSlavedStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(SlavedGroupServerStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.hs = hs diff --git a/synapse/replication/slave/storage/presence.py b/synapse/replication/slave/storage/presence.py index 025f6f6be8..55620c03d8 100644 --- a/synapse/replication/slave/storage/presence.py +++ b/synapse/replication/slave/storage/presence.py @@ -25,7 +25,7 @@ from ._slaved_id_tracker import SlavedIdTracker class SlavedPresenceStore(BaseSlavedStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(SlavedPresenceStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self._presence_id_gen = SlavedIdTracker(db_conn, "presence_stream", "stream_id") self._presence_on_startup = self._get_active_presence(db_conn) # type: ignore diff --git a/synapse/replication/slave/storage/pushers.py b/synapse/replication/slave/storage/pushers.py index 9da218bfe8..c418730ba8 100644 --- a/synapse/replication/slave/storage/pushers.py +++ b/synapse/replication/slave/storage/pushers.py @@ -24,7 +24,7 @@ from ._slaved_id_tracker import SlavedIdTracker class SlavedPusherStore(PusherWorkerStore, BaseSlavedStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(SlavedPusherStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self._pushers_id_gen = SlavedIdTracker( db_conn, "pushers", "id", extra_tables=[("deleted_pushers", "stream_id")] ) diff --git a/synapse/replication/slave/storage/receipts.py b/synapse/replication/slave/storage/receipts.py index 5c2986e050..6195917376 100644 --- a/synapse/replication/slave/storage/receipts.py +++ b/synapse/replication/slave/storage/receipts.py @@ -30,7 +30,7 @@ class SlavedReceiptsStore(ReceiptsWorkerStore, BaseSlavedStore): db_conn, "receipts_linearized", "stream_id" ) - super(SlavedReceiptsStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) def get_max_receipt_stream_id(self): return self._receipts_id_gen.get_current_token() diff --git a/synapse/replication/slave/storage/room.py b/synapse/replication/slave/storage/room.py index 80ae803ad9..109ac6bea1 100644 --- a/synapse/replication/slave/storage/room.py +++ b/synapse/replication/slave/storage/room.py @@ -23,7 +23,7 @@ from ._slaved_id_tracker import SlavedIdTracker class RoomStore(RoomWorkerStore, BaseSlavedStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(RoomStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self._public_room_id_gen = SlavedIdTracker( db_conn, "public_room_list_stream", "stream_id" ) diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index 1f609f158c..54dccd15a6 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -345,7 +345,7 @@ class PushRulesStream(Stream): def __init__(self, hs): self.store = hs.get_datastore() - super(PushRulesStream, self).__init__( + super().__init__( hs.get_instance_name(), self._current_token, self.store.get_all_push_rule_updates, diff --git a/synapse/rest/admin/devices.py b/synapse/rest/admin/devices.py index 4670d7160d..a163863322 100644 --- a/synapse/rest/admin/devices.py +++ b/synapse/rest/admin/devices.py @@ -36,7 +36,7 @@ class DeviceRestServlet(RestServlet): ) def __init__(self, hs): - super(DeviceRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.device_handler = hs.get_device_handler() diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py index b210015173..faabeeb91c 100644 --- a/synapse/rest/client/v1/directory.py +++ b/synapse/rest/client/v1/directory.py @@ -40,7 +40,7 @@ class ClientDirectoryServer(RestServlet): PATTERNS = client_patterns("/directory/room/(?P[^/]*)$", v1=True) def __init__(self, hs): - super(ClientDirectoryServer, self).__init__() + super().__init__() self.store = hs.get_datastore() self.handlers = hs.get_handlers() self.auth = hs.get_auth() @@ -120,7 +120,7 @@ class ClientDirectoryListServer(RestServlet): PATTERNS = client_patterns("/directory/list/room/(?P[^/]*)$", v1=True) def __init__(self, hs): - super(ClientDirectoryListServer, self).__init__() + super().__init__() self.store = hs.get_datastore() self.handlers = hs.get_handlers() self.auth = hs.get_auth() @@ -160,7 +160,7 @@ class ClientAppserviceDirectoryListServer(RestServlet): ) def __init__(self, hs): - super(ClientAppserviceDirectoryListServer, self).__init__() + super().__init__() self.store = hs.get_datastore() self.handlers = hs.get_handlers() self.auth = hs.get_auth() diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py index 25effd0261..985d994f6b 100644 --- a/synapse/rest/client/v1/events.py +++ b/synapse/rest/client/v1/events.py @@ -30,7 +30,7 @@ class EventStreamRestServlet(RestServlet): DEFAULT_LONGPOLL_TIME_MS = 30000 def __init__(self, hs): - super(EventStreamRestServlet, self).__init__() + super().__init__() self.event_stream_handler = hs.get_event_stream_handler() self.auth = hs.get_auth() @@ -74,7 +74,7 @@ class EventRestServlet(RestServlet): PATTERNS = client_patterns("/events/(?P[^/]*)$", v1=True) def __init__(self, hs): - super(EventRestServlet, self).__init__() + super().__init__() self.clock = hs.get_clock() self.event_handler = hs.get_event_handler() self.auth = hs.get_auth() diff --git a/synapse/rest/client/v1/initial_sync.py b/synapse/rest/client/v1/initial_sync.py index 910b3b4eeb..d7042786ce 100644 --- a/synapse/rest/client/v1/initial_sync.py +++ b/synapse/rest/client/v1/initial_sync.py @@ -24,7 +24,7 @@ class InitialSyncRestServlet(RestServlet): PATTERNS = client_patterns("/initialSync$", v1=True) def __init__(self, hs): - super(InitialSyncRestServlet, self).__init__() + super().__init__() self.initial_sync_handler = hs.get_initial_sync_handler() self.auth = hs.get_auth() diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index dd8cdc0d9f..250b03a025 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -48,7 +48,7 @@ class LoginRestServlet(RestServlet): APPSERVICE_TYPE = "uk.half-shot.msc2778.login.application_service" def __init__(self, hs): - super(LoginRestServlet, self).__init__() + super().__init__() self.hs = hs # JWT configuration variables. @@ -429,7 +429,7 @@ class CasTicketServlet(RestServlet): PATTERNS = client_patterns("/login/cas/ticket", v1=True) def __init__(self, hs): - super(CasTicketServlet, self).__init__() + super().__init__() self._cas_handler = hs.get_cas_handler() async def on_GET(self, request: SynapseRequest) -> None: diff --git a/synapse/rest/client/v1/logout.py b/synapse/rest/client/v1/logout.py index b0c30b65be..f792b50cdc 100644 --- a/synapse/rest/client/v1/logout.py +++ b/synapse/rest/client/v1/logout.py @@ -25,7 +25,7 @@ class LogoutRestServlet(RestServlet): PATTERNS = client_patterns("/logout$", v1=True) def __init__(self, hs): - super(LogoutRestServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self._auth_handler = hs.get_auth_handler() self._device_handler = hs.get_device_handler() @@ -53,7 +53,7 @@ class LogoutAllRestServlet(RestServlet): PATTERNS = client_patterns("/logout/all$", v1=True) def __init__(self, hs): - super(LogoutAllRestServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self._auth_handler = hs.get_auth_handler() self._device_handler = hs.get_device_handler() diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py index 970fdd5834..79d8e3057f 100644 --- a/synapse/rest/client/v1/presence.py +++ b/synapse/rest/client/v1/presence.py @@ -30,7 +30,7 @@ class PresenceStatusRestServlet(RestServlet): PATTERNS = client_patterns("/presence/(?P[^/]*)/status", v1=True) def __init__(self, hs): - super(PresenceStatusRestServlet, self).__init__() + super().__init__() self.hs = hs self.presence_handler = hs.get_presence_handler() self.clock = hs.get_clock() diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index e7fe50ed72..b686cd671f 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -25,7 +25,7 @@ class ProfileDisplaynameRestServlet(RestServlet): PATTERNS = client_patterns("/profile/(?P[^/]*)/displayname", v1=True) def __init__(self, hs): - super(ProfileDisplaynameRestServlet, self).__init__() + super().__init__() self.hs = hs self.profile_handler = hs.get_profile_handler() self.auth = hs.get_auth() @@ -73,7 +73,7 @@ class ProfileAvatarURLRestServlet(RestServlet): PATTERNS = client_patterns("/profile/(?P[^/]*)/avatar_url", v1=True) def __init__(self, hs): - super(ProfileAvatarURLRestServlet, self).__init__() + super().__init__() self.hs = hs self.profile_handler = hs.get_profile_handler() self.auth = hs.get_auth() @@ -124,7 +124,7 @@ class ProfileRestServlet(RestServlet): PATTERNS = client_patterns("/profile/(?P[^/]*)", v1=True) def __init__(self, hs): - super(ProfileRestServlet, self).__init__() + super().__init__() self.hs = hs self.profile_handler = hs.get_profile_handler() self.auth = hs.get_auth() diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index ddf8ed5e9c..f9eecb7cf5 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -38,7 +38,7 @@ class PushRuleRestServlet(RestServlet): ) def __init__(self, hs): - super(PushRuleRestServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastore() self.notifier = hs.get_notifier() diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index 5f65cb7d83..28dabf1c7a 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -44,7 +44,7 @@ class PushersRestServlet(RestServlet): PATTERNS = client_patterns("/pushers$", v1=True) def __init__(self, hs): - super(PushersRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() @@ -68,7 +68,7 @@ class PushersSetRestServlet(RestServlet): PATTERNS = client_patterns("/pushers/set$", v1=True) def __init__(self, hs): - super(PushersSetRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.notifier = hs.get_notifier() @@ -153,7 +153,7 @@ class PushersRemoveRestServlet(RestServlet): SUCCESS_HTML = b"You have been unsubscribed" def __init__(self, hs): - super(PushersRemoveRestServlet, self).__init__() + super().__init__() self.hs = hs self.notifier = hs.get_notifier() self.auth = hs.get_auth() diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 84baf3d59b..7e64a2e0fe 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -57,7 +57,7 @@ logger = logging.getLogger(__name__) class TransactionRestServlet(RestServlet): def __init__(self, hs): - super(TransactionRestServlet, self).__init__() + super().__init__() self.txns = HttpTransactionCache(hs) @@ -65,7 +65,7 @@ class RoomCreateRestServlet(TransactionRestServlet): # No PATTERN; we have custom dispatch rules here def __init__(self, hs): - super(RoomCreateRestServlet, self).__init__(hs) + super().__init__(hs) self._room_creation_handler = hs.get_room_creation_handler() self.auth = hs.get_auth() @@ -111,7 +111,7 @@ class RoomCreateRestServlet(TransactionRestServlet): # TODO: Needs unit testing for generic events class RoomStateEventRestServlet(TransactionRestServlet): def __init__(self, hs): - super(RoomStateEventRestServlet, self).__init__(hs) + super().__init__(hs) self.handlers = hs.get_handlers() self.event_creation_handler = hs.get_event_creation_handler() self.room_member_handler = hs.get_room_member_handler() @@ -229,7 +229,7 @@ class RoomStateEventRestServlet(TransactionRestServlet): # TODO: Needs unit testing for generic events + feedback class RoomSendEventRestServlet(TransactionRestServlet): def __init__(self, hs): - super(RoomSendEventRestServlet, self).__init__(hs) + super().__init__(hs) self.event_creation_handler = hs.get_event_creation_handler() self.auth = hs.get_auth() @@ -280,7 +280,7 @@ class RoomSendEventRestServlet(TransactionRestServlet): # TODO: Needs unit testing for room ID + alias joins class JoinRoomAliasServlet(TransactionRestServlet): def __init__(self, hs): - super(JoinRoomAliasServlet, self).__init__(hs) + super().__init__(hs) self.room_member_handler = hs.get_room_member_handler() self.auth = hs.get_auth() @@ -343,7 +343,7 @@ class PublicRoomListRestServlet(TransactionRestServlet): PATTERNS = client_patterns("/publicRooms$", v1=True) def __init__(self, hs): - super(PublicRoomListRestServlet, self).__init__(hs) + super().__init__(hs) self.hs = hs self.auth = hs.get_auth() @@ -448,7 +448,7 @@ class RoomMemberListRestServlet(RestServlet): PATTERNS = client_patterns("/rooms/(?P[^/]*)/members$", v1=True) def __init__(self, hs): - super(RoomMemberListRestServlet, self).__init__() + super().__init__() self.message_handler = hs.get_message_handler() self.auth = hs.get_auth() @@ -499,7 +499,7 @@ class JoinedRoomMemberListRestServlet(RestServlet): PATTERNS = client_patterns("/rooms/(?P[^/]*)/joined_members$", v1=True) def __init__(self, hs): - super(JoinedRoomMemberListRestServlet, self).__init__() + super().__init__() self.message_handler = hs.get_message_handler() self.auth = hs.get_auth() @@ -518,7 +518,7 @@ class RoomMessageListRestServlet(RestServlet): PATTERNS = client_patterns("/rooms/(?P[^/]*)/messages$", v1=True) def __init__(self, hs): - super(RoomMessageListRestServlet, self).__init__() + super().__init__() self.pagination_handler = hs.get_pagination_handler() self.auth = hs.get_auth() @@ -557,7 +557,7 @@ class RoomStateRestServlet(RestServlet): PATTERNS = client_patterns("/rooms/(?P[^/]*)/state$", v1=True) def __init__(self, hs): - super(RoomStateRestServlet, self).__init__() + super().__init__() self.message_handler = hs.get_message_handler() self.auth = hs.get_auth() @@ -577,7 +577,7 @@ class RoomInitialSyncRestServlet(RestServlet): PATTERNS = client_patterns("/rooms/(?P[^/]*)/initialSync$", v1=True) def __init__(self, hs): - super(RoomInitialSyncRestServlet, self).__init__() + super().__init__() self.initial_sync_handler = hs.get_initial_sync_handler() self.auth = hs.get_auth() @@ -596,7 +596,7 @@ class RoomEventServlet(RestServlet): ) def __init__(self, hs): - super(RoomEventServlet, self).__init__() + super().__init__() self.clock = hs.get_clock() self.event_handler = hs.get_event_handler() self._event_serializer = hs.get_event_client_serializer() @@ -628,7 +628,7 @@ class RoomEventContextServlet(RestServlet): ) def __init__(self, hs): - super(RoomEventContextServlet, self).__init__() + super().__init__() self.clock = hs.get_clock() self.room_context_handler = hs.get_room_context_handler() self._event_serializer = hs.get_event_client_serializer() @@ -675,7 +675,7 @@ class RoomEventContextServlet(RestServlet): class RoomForgetRestServlet(TransactionRestServlet): def __init__(self, hs): - super(RoomForgetRestServlet, self).__init__(hs) + super().__init__(hs) self.room_member_handler = hs.get_room_member_handler() self.auth = hs.get_auth() @@ -701,7 +701,7 @@ class RoomForgetRestServlet(TransactionRestServlet): # TODO: Needs unit testing class RoomMembershipRestServlet(TransactionRestServlet): def __init__(self, hs): - super(RoomMembershipRestServlet, self).__init__(hs) + super().__init__(hs) self.room_member_handler = hs.get_room_member_handler() self.auth = hs.get_auth() @@ -792,7 +792,7 @@ class RoomMembershipRestServlet(TransactionRestServlet): class RoomRedactEventRestServlet(TransactionRestServlet): def __init__(self, hs): - super(RoomRedactEventRestServlet, self).__init__(hs) + super().__init__(hs) self.handlers = hs.get_handlers() self.event_creation_handler = hs.get_event_creation_handler() self.auth = hs.get_auth() @@ -841,7 +841,7 @@ class RoomTypingRestServlet(RestServlet): ) def __init__(self, hs): - super(RoomTypingRestServlet, self).__init__() + super().__init__() self.presence_handler = hs.get_presence_handler() self.typing_handler = hs.get_typing_handler() self.auth = hs.get_auth() @@ -914,7 +914,7 @@ class SearchRestServlet(RestServlet): PATTERNS = client_patterns("/search$", v1=True) def __init__(self, hs): - super(SearchRestServlet, self).__init__() + super().__init__() self.handlers = hs.get_handlers() self.auth = hs.get_auth() @@ -935,7 +935,7 @@ class JoinedRoomsRestServlet(RestServlet): PATTERNS = client_patterns("/joined_rooms$", v1=True) def __init__(self, hs): - super(JoinedRoomsRestServlet, self).__init__() + super().__init__() self.store = hs.get_datastore() self.auth = hs.get_auth() diff --git a/synapse/rest/client/v1/voip.py b/synapse/rest/client/v1/voip.py index 50277c6cf6..b8d491ca5c 100644 --- a/synapse/rest/client/v1/voip.py +++ b/synapse/rest/client/v1/voip.py @@ -25,7 +25,7 @@ class VoipRestServlet(RestServlet): PATTERNS = client_patterns("/voip/turnServer$", v1=True) def __init__(self, hs): - super(VoipRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index ade97a6708..c3ce0f6259 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -52,7 +52,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): PATTERNS = client_patterns("/account/password/email/requestToken$") def __init__(self, hs): - super(EmailPasswordRequestTokenRestServlet, self).__init__() + super().__init__() self.hs = hs self.datastore = hs.get_datastore() self.config = hs.config @@ -156,7 +156,7 @@ class PasswordRestServlet(RestServlet): PATTERNS = client_patterns("/account/password$") def __init__(self, hs): - super(PasswordRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.auth_handler = hs.get_auth_handler() @@ -282,7 +282,7 @@ class DeactivateAccountRestServlet(RestServlet): PATTERNS = client_patterns("/account/deactivate$") def __init__(self, hs): - super(DeactivateAccountRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.auth_handler = hs.get_auth_handler() @@ -330,7 +330,7 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): PATTERNS = client_patterns("/account/3pid/email/requestToken$") def __init__(self, hs): - super(EmailThreepidRequestTokenRestServlet, self).__init__() + super().__init__() self.hs = hs self.config = hs.config self.identity_handler = hs.get_handlers().identity_handler @@ -427,7 +427,7 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet): def __init__(self, hs): self.hs = hs - super(MsisdnThreepidRequestTokenRestServlet, self).__init__() + super().__init__() self.store = self.hs.get_datastore() self.identity_handler = hs.get_handlers().identity_handler @@ -606,7 +606,7 @@ class ThreepidRestServlet(RestServlet): PATTERNS = client_patterns("/account/3pid$") def __init__(self, hs): - super(ThreepidRestServlet, self).__init__() + super().__init__() self.hs = hs self.identity_handler = hs.get_handlers().identity_handler self.auth = hs.get_auth() @@ -662,7 +662,7 @@ class ThreepidAddRestServlet(RestServlet): PATTERNS = client_patterns("/account/3pid/add$") def __init__(self, hs): - super(ThreepidAddRestServlet, self).__init__() + super().__init__() self.hs = hs self.identity_handler = hs.get_handlers().identity_handler self.auth = hs.get_auth() @@ -713,7 +713,7 @@ class ThreepidBindRestServlet(RestServlet): PATTERNS = client_patterns("/account/3pid/bind$") def __init__(self, hs): - super(ThreepidBindRestServlet, self).__init__() + super().__init__() self.hs = hs self.identity_handler = hs.get_handlers().identity_handler self.auth = hs.get_auth() @@ -742,7 +742,7 @@ class ThreepidUnbindRestServlet(RestServlet): PATTERNS = client_patterns("/account/3pid/unbind$") def __init__(self, hs): - super(ThreepidUnbindRestServlet, self).__init__() + super().__init__() self.hs = hs self.identity_handler = hs.get_handlers().identity_handler self.auth = hs.get_auth() @@ -773,7 +773,7 @@ class ThreepidDeleteRestServlet(RestServlet): PATTERNS = client_patterns("/account/3pid/delete$") def __init__(self, hs): - super(ThreepidDeleteRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.auth_handler = hs.get_auth_handler() @@ -852,7 +852,7 @@ class WhoamiRestServlet(RestServlet): PATTERNS = client_patterns("/account/whoami$") def __init__(self, hs): - super(WhoamiRestServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() async def on_GET(self, request): diff --git a/synapse/rest/client/v2_alpha/account_data.py b/synapse/rest/client/v2_alpha/account_data.py index c1d4cd0caf..87a5b1b86b 100644 --- a/synapse/rest/client/v2_alpha/account_data.py +++ b/synapse/rest/client/v2_alpha/account_data.py @@ -34,7 +34,7 @@ class AccountDataServlet(RestServlet): ) def __init__(self, hs): - super(AccountDataServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastore() self.notifier = hs.get_notifier() @@ -86,7 +86,7 @@ class RoomAccountDataServlet(RestServlet): ) def __init__(self, hs): - super(RoomAccountDataServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastore() self.notifier = hs.get_notifier() diff --git a/synapse/rest/client/v2_alpha/account_validity.py b/synapse/rest/client/v2_alpha/account_validity.py index d06336ceea..bd7f9ae203 100644 --- a/synapse/rest/client/v2_alpha/account_validity.py +++ b/synapse/rest/client/v2_alpha/account_validity.py @@ -32,7 +32,7 @@ class AccountValidityRenewServlet(RestServlet): Args: hs (synapse.server.HomeServer): server """ - super(AccountValidityRenewServlet, self).__init__() + super().__init__() self.hs = hs self.account_activity_handler = hs.get_account_validity_handler() @@ -67,7 +67,7 @@ class AccountValiditySendMailServlet(RestServlet): Args: hs (synapse.server.HomeServer): server """ - super(AccountValiditySendMailServlet, self).__init__() + super().__init__() self.hs = hs self.account_activity_handler = hs.get_account_validity_handler() diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index 8e585e9153..097538f968 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -124,7 +124,7 @@ class AuthRestServlet(RestServlet): PATTERNS = client_patterns(r"/auth/(?P[\w\.]*)/fallback/web") def __init__(self, hs): - super(AuthRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.auth_handler = hs.get_auth_handler() diff --git a/synapse/rest/client/v2_alpha/capabilities.py b/synapse/rest/client/v2_alpha/capabilities.py index fe9d019c44..76879ac559 100644 --- a/synapse/rest/client/v2_alpha/capabilities.py +++ b/synapse/rest/client/v2_alpha/capabilities.py @@ -32,7 +32,7 @@ class CapabilitiesRestServlet(RestServlet): Args: hs (synapse.server.HomeServer): server """ - super(CapabilitiesRestServlet, self).__init__() + super().__init__() self.hs = hs self.config = hs.config self.auth = hs.get_auth() diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py index c0714fcfb1..7e174de692 100644 --- a/synapse/rest/client/v2_alpha/devices.py +++ b/synapse/rest/client/v2_alpha/devices.py @@ -35,7 +35,7 @@ class DevicesRestServlet(RestServlet): Args: hs (synapse.server.HomeServer): server """ - super(DevicesRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.device_handler = hs.get_device_handler() @@ -57,7 +57,7 @@ class DeleteDevicesRestServlet(RestServlet): PATTERNS = client_patterns("/delete_devices") def __init__(self, hs): - super(DeleteDevicesRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.device_handler = hs.get_device_handler() @@ -102,7 +102,7 @@ class DeviceRestServlet(RestServlet): Args: hs (synapse.server.HomeServer): server """ - super(DeviceRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.device_handler = hs.get_device_handler() diff --git a/synapse/rest/client/v2_alpha/filter.py b/synapse/rest/client/v2_alpha/filter.py index b28da017cd..7cc692643b 100644 --- a/synapse/rest/client/v2_alpha/filter.py +++ b/synapse/rest/client/v2_alpha/filter.py @@ -28,7 +28,7 @@ class GetFilterRestServlet(RestServlet): PATTERNS = client_patterns("/user/(?P[^/]*)/filter/(?P[^/]*)") def __init__(self, hs): - super(GetFilterRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.filtering = hs.get_filtering() @@ -64,7 +64,7 @@ class CreateFilterRestServlet(RestServlet): PATTERNS = client_patterns("/user/(?P[^/]*)/filter") def __init__(self, hs): - super(CreateFilterRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.filtering = hs.get_filtering() diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index 13ecf7005d..a3bb095c2d 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -32,7 +32,7 @@ class GroupServlet(RestServlet): PATTERNS = client_patterns("/groups/(?P[^/]*)/profile$") def __init__(self, hs): - super(GroupServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -66,7 +66,7 @@ class GroupSummaryServlet(RestServlet): PATTERNS = client_patterns("/groups/(?P[^/]*)/summary$") def __init__(self, hs): - super(GroupSummaryServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -97,7 +97,7 @@ class GroupSummaryRoomsCatServlet(RestServlet): ) def __init__(self, hs): - super(GroupSummaryRoomsCatServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -137,7 +137,7 @@ class GroupCategoryServlet(RestServlet): ) def __init__(self, hs): - super(GroupCategoryServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -181,7 +181,7 @@ class GroupCategoriesServlet(RestServlet): PATTERNS = client_patterns("/groups/(?P[^/]*)/categories/$") def __init__(self, hs): - super(GroupCategoriesServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -204,7 +204,7 @@ class GroupRoleServlet(RestServlet): PATTERNS = client_patterns("/groups/(?P[^/]*)/roles/(?P[^/]+)$") def __init__(self, hs): - super(GroupRoleServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -248,7 +248,7 @@ class GroupRolesServlet(RestServlet): PATTERNS = client_patterns("/groups/(?P[^/]*)/roles/$") def __init__(self, hs): - super(GroupRolesServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -279,7 +279,7 @@ class GroupSummaryUsersRoleServlet(RestServlet): ) def __init__(self, hs): - super(GroupSummaryUsersRoleServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -317,7 +317,7 @@ class GroupRoomServlet(RestServlet): PATTERNS = client_patterns("/groups/(?P[^/]*)/rooms$") def __init__(self, hs): - super(GroupRoomServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -343,7 +343,7 @@ class GroupUsersServlet(RestServlet): PATTERNS = client_patterns("/groups/(?P[^/]*)/users$") def __init__(self, hs): - super(GroupUsersServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -366,7 +366,7 @@ class GroupInvitedUsersServlet(RestServlet): PATTERNS = client_patterns("/groups/(?P[^/]*)/invited_users$") def __init__(self, hs): - super(GroupInvitedUsersServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -389,7 +389,7 @@ class GroupSettingJoinPolicyServlet(RestServlet): PATTERNS = client_patterns("/groups/(?P[^/]*)/settings/m.join_policy$") def __init__(self, hs): - super(GroupSettingJoinPolicyServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.groups_handler = hs.get_groups_local_handler() @@ -413,7 +413,7 @@ class GroupCreateServlet(RestServlet): PATTERNS = client_patterns("/create_group$") def __init__(self, hs): - super(GroupCreateServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -444,7 +444,7 @@ class GroupAdminRoomsServlet(RestServlet): ) def __init__(self, hs): - super(GroupAdminRoomsServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -481,7 +481,7 @@ class GroupAdminRoomsConfigServlet(RestServlet): ) def __init__(self, hs): - super(GroupAdminRoomsConfigServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -507,7 +507,7 @@ class GroupAdminUsersInviteServlet(RestServlet): ) def __init__(self, hs): - super(GroupAdminUsersInviteServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -536,7 +536,7 @@ class GroupAdminUsersKickServlet(RestServlet): ) def __init__(self, hs): - super(GroupAdminUsersKickServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -560,7 +560,7 @@ class GroupSelfLeaveServlet(RestServlet): PATTERNS = client_patterns("/groups/(?P[^/]*)/self/leave$") def __init__(self, hs): - super(GroupSelfLeaveServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -584,7 +584,7 @@ class GroupSelfJoinServlet(RestServlet): PATTERNS = client_patterns("/groups/(?P[^/]*)/self/join$") def __init__(self, hs): - super(GroupSelfJoinServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -608,7 +608,7 @@ class GroupSelfAcceptInviteServlet(RestServlet): PATTERNS = client_patterns("/groups/(?P[^/]*)/self/accept_invite$") def __init__(self, hs): - super(GroupSelfAcceptInviteServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() @@ -632,7 +632,7 @@ class GroupSelfUpdatePublicityServlet(RestServlet): PATTERNS = client_patterns("/groups/(?P[^/]*)/self/update_publicity$") def __init__(self, hs): - super(GroupSelfUpdatePublicityServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.store = hs.get_datastore() @@ -655,7 +655,7 @@ class PublicisedGroupsForUserServlet(RestServlet): PATTERNS = client_patterns("/publicised_groups/(?P[^/]*)$") def __init__(self, hs): - super(PublicisedGroupsForUserServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.store = hs.get_datastore() @@ -676,7 +676,7 @@ class PublicisedGroupsForUsersServlet(RestServlet): PATTERNS = client_patterns("/publicised_groups$") def __init__(self, hs): - super(PublicisedGroupsForUsersServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.store = hs.get_datastore() @@ -700,7 +700,7 @@ class GroupsForUserServlet(RestServlet): PATTERNS = client_patterns("/joined_groups$") def __init__(self, hs): - super(GroupsForUserServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.clock = hs.get_clock() self.groups_handler = hs.get_groups_local_handler() diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index 24bb090822..7abd6ff333 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -64,7 +64,7 @@ class KeyUploadServlet(RestServlet): Args: hs (synapse.server.HomeServer): server """ - super(KeyUploadServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.e2e_keys_handler = hs.get_e2e_keys_handler() @@ -147,7 +147,7 @@ class KeyQueryServlet(RestServlet): Args: hs (synapse.server.HomeServer): """ - super(KeyQueryServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.e2e_keys_handler = hs.get_e2e_keys_handler() @@ -177,7 +177,7 @@ class KeyChangesServlet(RestServlet): Args: hs (synapse.server.HomeServer): """ - super(KeyChangesServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.device_handler = hs.get_device_handler() @@ -222,7 +222,7 @@ class OneTimeKeyServlet(RestServlet): PATTERNS = client_patterns("/keys/claim$") def __init__(self, hs): - super(OneTimeKeyServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.e2e_keys_handler = hs.get_e2e_keys_handler() @@ -250,7 +250,7 @@ class SigningKeyUploadServlet(RestServlet): Args: hs (synapse.server.HomeServer): server """ - super(SigningKeyUploadServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.e2e_keys_handler = hs.get_e2e_keys_handler() @@ -308,7 +308,7 @@ class SignaturesUploadServlet(RestServlet): Args: hs (synapse.server.HomeServer): server """ - super(SignaturesUploadServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.e2e_keys_handler = hs.get_e2e_keys_handler() diff --git a/synapse/rest/client/v2_alpha/notifications.py b/synapse/rest/client/v2_alpha/notifications.py index aa911d75ee..87063ec8b1 100644 --- a/synapse/rest/client/v2_alpha/notifications.py +++ b/synapse/rest/client/v2_alpha/notifications.py @@ -27,7 +27,7 @@ class NotificationsServlet(RestServlet): PATTERNS = client_patterns("/notifications$") def __init__(self, hs): - super(NotificationsServlet, self).__init__() + super().__init__() self.store = hs.get_datastore() self.auth = hs.get_auth() self.clock = hs.get_clock() diff --git a/synapse/rest/client/v2_alpha/openid.py b/synapse/rest/client/v2_alpha/openid.py index 6ae9a5a8e9..5b996e2d63 100644 --- a/synapse/rest/client/v2_alpha/openid.py +++ b/synapse/rest/client/v2_alpha/openid.py @@ -60,7 +60,7 @@ class IdTokenServlet(RestServlet): EXPIRES_MS = 3600 * 1000 def __init__(self, hs): - super(IdTokenServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastore() self.clock = hs.get_clock() diff --git a/synapse/rest/client/v2_alpha/password_policy.py b/synapse/rest/client/v2_alpha/password_policy.py index 968403cca4..68b27ff23a 100644 --- a/synapse/rest/client/v2_alpha/password_policy.py +++ b/synapse/rest/client/v2_alpha/password_policy.py @@ -30,7 +30,7 @@ class PasswordPolicyServlet(RestServlet): Args: hs (synapse.server.HomeServer): server """ - super(PasswordPolicyServlet, self).__init__() + super().__init__() self.policy = hs.config.password_policy self.enabled = hs.config.password_policy_enabled diff --git a/synapse/rest/client/v2_alpha/read_marker.py b/synapse/rest/client/v2_alpha/read_marker.py index 67cbc37312..55c6688f52 100644 --- a/synapse/rest/client/v2_alpha/read_marker.py +++ b/synapse/rest/client/v2_alpha/read_marker.py @@ -26,7 +26,7 @@ class ReadMarkerRestServlet(RestServlet): PATTERNS = client_patterns("/rooms/(?P[^/]*)/read_markers$") def __init__(self, hs): - super(ReadMarkerRestServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.receipts_handler = hs.get_receipts_handler() self.read_marker_handler = hs.get_read_marker_handler() diff --git a/synapse/rest/client/v2_alpha/receipts.py b/synapse/rest/client/v2_alpha/receipts.py index 92555bd4a9..6f7246a394 100644 --- a/synapse/rest/client/v2_alpha/receipts.py +++ b/synapse/rest/client/v2_alpha/receipts.py @@ -31,7 +31,7 @@ class ReceiptRestServlet(RestServlet): ) def __init__(self, hs): - super(ReceiptRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.receipts_handler = hs.get_receipts_handler() diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 0705718d00..ffa2dfce42 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -76,7 +76,7 @@ class EmailRegisterRequestTokenRestServlet(RestServlet): Args: hs (synapse.server.HomeServer): server """ - super(EmailRegisterRequestTokenRestServlet, self).__init__() + super().__init__() self.hs = hs self.identity_handler = hs.get_handlers().identity_handler self.config = hs.config @@ -174,7 +174,7 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet): Args: hs (synapse.server.HomeServer): server """ - super(MsisdnRegisterRequestTokenRestServlet, self).__init__() + super().__init__() self.hs = hs self.identity_handler = hs.get_handlers().identity_handler @@ -249,7 +249,7 @@ class RegistrationSubmitTokenServlet(RestServlet): Args: hs (synapse.server.HomeServer): server """ - super(RegistrationSubmitTokenServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.config = hs.config @@ -319,7 +319,7 @@ class UsernameAvailabilityRestServlet(RestServlet): Args: hs (synapse.server.HomeServer): server """ - super(UsernameAvailabilityRestServlet, self).__init__() + super().__init__() self.hs = hs self.registration_handler = hs.get_registration_handler() self.ratelimiter = FederationRateLimiter( @@ -363,7 +363,7 @@ class RegisterRestServlet(RestServlet): Args: hs (synapse.server.HomeServer): server """ - super(RegisterRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py index e29f49f7f5..18c75738f8 100644 --- a/synapse/rest/client/v2_alpha/relations.py +++ b/synapse/rest/client/v2_alpha/relations.py @@ -61,7 +61,7 @@ class RelationSendServlet(RestServlet): ) def __init__(self, hs): - super(RelationSendServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.event_creation_handler = hs.get_event_creation_handler() self.txns = HttpTransactionCache(hs) @@ -138,7 +138,7 @@ class RelationPaginationServlet(RestServlet): ) def __init__(self, hs): - super(RelationPaginationServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastore() self.clock = hs.get_clock() @@ -233,7 +233,7 @@ class RelationAggregationPaginationServlet(RestServlet): ) def __init__(self, hs): - super(RelationAggregationPaginationServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastore() self.event_handler = hs.get_event_handler() @@ -311,7 +311,7 @@ class RelationAggregationGroupPaginationServlet(RestServlet): ) def __init__(self, hs): - super(RelationAggregationGroupPaginationServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastore() self.clock = hs.get_clock() diff --git a/synapse/rest/client/v2_alpha/report_event.py b/synapse/rest/client/v2_alpha/report_event.py index e15927c4ea..215d619ca1 100644 --- a/synapse/rest/client/v2_alpha/report_event.py +++ b/synapse/rest/client/v2_alpha/report_event.py @@ -32,7 +32,7 @@ class ReportEventRestServlet(RestServlet): PATTERNS = client_patterns("/rooms/(?P[^/]*)/report/(?P[^/]*)$") def __init__(self, hs): - super(ReportEventRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.clock = hs.get_clock() diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 59529707df..53de97923f 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -37,7 +37,7 @@ class RoomKeysServlet(RestServlet): Args: hs (synapse.server.HomeServer): server """ - super(RoomKeysServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler() @@ -248,7 +248,7 @@ class RoomKeysNewVersionServlet(RestServlet): Args: hs (synapse.server.HomeServer): server """ - super(RoomKeysNewVersionServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler() @@ -301,7 +301,7 @@ class RoomKeysVersionServlet(RestServlet): Args: hs (synapse.server.HomeServer): server """ - super(RoomKeysVersionServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler() diff --git a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py index 39a5518614..bf030e0ff4 100644 --- a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py +++ b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py @@ -53,7 +53,7 @@ class RoomUpgradeRestServlet(RestServlet): ) def __init__(self, hs): - super(RoomUpgradeRestServlet, self).__init__() + super().__init__() self._hs = hs self._room_creation_handler = hs.get_room_creation_handler() self._auth = hs.get_auth() diff --git a/synapse/rest/client/v2_alpha/sendtodevice.py b/synapse/rest/client/v2_alpha/sendtodevice.py index db829f3098..bc4f43639a 100644 --- a/synapse/rest/client/v2_alpha/sendtodevice.py +++ b/synapse/rest/client/v2_alpha/sendtodevice.py @@ -36,7 +36,7 @@ class SendToDeviceRestServlet(servlet.RestServlet): Args: hs (synapse.server.HomeServer): server """ - super(SendToDeviceRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.txns = HttpTransactionCache(hs) diff --git a/synapse/rest/client/v2_alpha/shared_rooms.py b/synapse/rest/client/v2_alpha/shared_rooms.py index 2492634dac..c866d5151c 100644 --- a/synapse/rest/client/v2_alpha/shared_rooms.py +++ b/synapse/rest/client/v2_alpha/shared_rooms.py @@ -34,7 +34,7 @@ class UserSharedRoomsServlet(RestServlet): ) def __init__(self, hs): - super(UserSharedRoomsServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastore() self.user_directory_active = hs.config.update_user_directory diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index a0b00135e1..51e395cc64 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -74,7 +74,7 @@ class SyncRestServlet(RestServlet): ALLOWED_PRESENCE = {"online", "offline", "unavailable"} def __init__(self, hs): - super(SyncRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.sync_handler = hs.get_sync_handler() diff --git a/synapse/rest/client/v2_alpha/tags.py b/synapse/rest/client/v2_alpha/tags.py index a3f12e8a77..bf3a79db44 100644 --- a/synapse/rest/client/v2_alpha/tags.py +++ b/synapse/rest/client/v2_alpha/tags.py @@ -31,7 +31,7 @@ class TagListServlet(RestServlet): PATTERNS = client_patterns("/user/(?P[^/]*)/rooms/(?P[^/]*)/tags") def __init__(self, hs): - super(TagListServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastore() @@ -56,7 +56,7 @@ class TagServlet(RestServlet): ) def __init__(self, hs): - super(TagServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.store = hs.get_datastore() self.notifier = hs.get_notifier() diff --git a/synapse/rest/client/v2_alpha/thirdparty.py b/synapse/rest/client/v2_alpha/thirdparty.py index 23709960ad..0c127a1b5f 100644 --- a/synapse/rest/client/v2_alpha/thirdparty.py +++ b/synapse/rest/client/v2_alpha/thirdparty.py @@ -28,7 +28,7 @@ class ThirdPartyProtocolsServlet(RestServlet): PATTERNS = client_patterns("/thirdparty/protocols") def __init__(self, hs): - super(ThirdPartyProtocolsServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.appservice_handler = hs.get_application_service_handler() @@ -44,7 +44,7 @@ class ThirdPartyProtocolServlet(RestServlet): PATTERNS = client_patterns("/thirdparty/protocol/(?P[^/]+)$") def __init__(self, hs): - super(ThirdPartyProtocolServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.appservice_handler = hs.get_application_service_handler() @@ -65,7 +65,7 @@ class ThirdPartyUserServlet(RestServlet): PATTERNS = client_patterns("/thirdparty/user(/(?P[^/]+))?$") def __init__(self, hs): - super(ThirdPartyUserServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.appservice_handler = hs.get_application_service_handler() @@ -87,7 +87,7 @@ class ThirdPartyLocationServlet(RestServlet): PATTERNS = client_patterns("/thirdparty/location(/(?P[^/]+))?$") def __init__(self, hs): - super(ThirdPartyLocationServlet, self).__init__() + super().__init__() self.auth = hs.get_auth() self.appservice_handler = hs.get_application_service_handler() diff --git a/synapse/rest/client/v2_alpha/tokenrefresh.py b/synapse/rest/client/v2_alpha/tokenrefresh.py index 83f3b6b70a..79317c74ba 100644 --- a/synapse/rest/client/v2_alpha/tokenrefresh.py +++ b/synapse/rest/client/v2_alpha/tokenrefresh.py @@ -28,7 +28,7 @@ class TokenRefreshRestServlet(RestServlet): PATTERNS = client_patterns("/tokenrefresh") def __init__(self, hs): - super(TokenRefreshRestServlet, self).__init__() + super().__init__() async def on_POST(self, request): raise AuthError(403, "tokenrefresh is no longer supported.") diff --git a/synapse/rest/client/v2_alpha/user_directory.py b/synapse/rest/client/v2_alpha/user_directory.py index bef91a2d3e..ad598cefe0 100644 --- a/synapse/rest/client/v2_alpha/user_directory.py +++ b/synapse/rest/client/v2_alpha/user_directory.py @@ -31,7 +31,7 @@ class UserDirectorySearchRestServlet(RestServlet): Args: hs (synapse.server.HomeServer): server """ - super(UserDirectorySearchRestServlet, self).__init__() + super().__init__() self.hs = hs self.auth = hs.get_auth() self.user_directory_handler = hs.get_user_directory_handler() diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 24ac57f35d..d5018afbda 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -28,7 +28,7 @@ class VersionsRestServlet(RestServlet): PATTERNS = [re.compile("^/_matrix/client/versions$")] def __init__(self, hs): - super(VersionsRestServlet, self).__init__() + super().__init__() self.config = hs.config def on_GET(self, request): diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 2ae2fbd5d7..ccb3384db9 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -172,7 +172,7 @@ class DataStore( else: self._cache_id_gen = None - super(DataStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self._presence_on_startup = self._get_active_presence(db_conn) diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 5f1a2b9aa6..c5a36990e4 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -42,7 +42,7 @@ class AccountDataWorkerStore(SQLBaseStore, metaclass=abc.ABCMeta): "AccountDataAndTagsChangeCache", account_max ) - super(AccountDataWorkerStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) @abc.abstractmethod def get_max_account_data_stream_id(self): @@ -313,7 +313,7 @@ class AccountDataStore(AccountDataWorkerStore): ], ) - super(AccountDataStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) def get_max_account_data_stream_id(self) -> int: """Get the current max stream id for the private user data stream diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py index 454c0bc50c..85f6b1e3fd 100644 --- a/synapse/storage/databases/main/appservice.py +++ b/synapse/storage/databases/main/appservice.py @@ -52,7 +52,7 @@ class ApplicationServiceWorkerStore(SQLBaseStore): ) self.exclusive_user_regex = _make_exclusive_regex(self.services_cache) - super(ApplicationServiceWorkerStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) def get_app_services(self): return self.services_cache diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index c2fc847fbc..239c7a949c 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -31,7 +31,7 @@ LAST_SEEN_GRANULARITY = 120 * 1000 class ClientIpBackgroundUpdateStore(SQLBaseStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(ClientIpBackgroundUpdateStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.db_pool.updates.register_background_index_update( "user_ips_device_index", @@ -358,7 +358,7 @@ class ClientIpStore(ClientIpBackgroundUpdateStore): name="client_ip_last_seen", keylen=4, max_entries=50000 ) - super(ClientIpStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.user_ips_max_age = hs.config.user_ips_max_age diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 0044433110..e71217a41f 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -283,7 +283,7 @@ class DeviceInboxBackgroundUpdateStore(SQLBaseStore): DEVICE_INBOX_STREAM_ID = "device_inbox_stream_drop" def __init__(self, database: DatabasePool, db_conn, hs): - super(DeviceInboxBackgroundUpdateStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.db_pool.updates.register_background_index_update( "device_inbox_stream_index", @@ -313,7 +313,7 @@ class DeviceInboxStore(DeviceInboxWorkerStore, DeviceInboxBackgroundUpdateStore) DEVICE_INBOX_STREAM_ID = "device_inbox_stream_drop" def __init__(self, database: DatabasePool, db_conn, hs): - super(DeviceInboxStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) # Map of (user_id, device_id) to the last stream_id that has been # deleted up to. This is so that we can no op deletions. diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 306fc6947c..c04374e43d 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -701,7 +701,7 @@ class DeviceWorkerStore(SQLBaseStore): class DeviceBackgroundUpdateStore(SQLBaseStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(DeviceBackgroundUpdateStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.db_pool.updates.register_background_index_update( "device_lists_stream_idx", @@ -826,7 +826,7 @@ class DeviceBackgroundUpdateStore(SQLBaseStore): class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(DeviceStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) # Map of (user_id, device_id) -> bool. If there is an entry that implies # the device exists. diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index 4c3c162acf..6d3689c09e 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -600,7 +600,7 @@ class EventFederationStore(EventFederationWorkerStore): EVENT_AUTH_STATE_ONLY = "event_auth_state_only" def __init__(self, database: DatabasePool, db_conn, hs): - super(EventFederationStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.db_pool.updates.register_background_update_handler( self.EVENT_AUTH_STATE_ONLY, self._background_delete_non_state_event_auth diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 7805fb814e..62f1738732 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -68,7 +68,7 @@ def _deserialize_action(actions, is_highlight): class EventPushActionsWorkerStore(SQLBaseStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(EventPushActionsWorkerStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) # These get correctly set by _find_stream_orderings_for_times_txn self.stream_ordering_month_ago = None @@ -661,7 +661,7 @@ class EventPushActionsStore(EventPushActionsWorkerStore): EPA_HIGHLIGHT_INDEX = "epa_highlight_index" def __init__(self, database: DatabasePool, db_conn, hs): - super(EventPushActionsStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.db_pool.updates.register_background_index_update( self.EPA_HIGHLIGHT_INDEX, diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index e53c6373a8..5e4af2eb51 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -29,7 +29,7 @@ class EventsBackgroundUpdatesStore(SQLBaseStore): DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" def __init__(self, database: DatabasePool, db_conn, hs): - super(EventsBackgroundUpdatesStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.db_pool.updates.register_background_update_handler( self.EVENT_ORIGIN_SERVER_TS_NAME, self._background_reindex_origin_server_ts diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index cd3739c16c..de9e8d1dc6 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -75,7 +75,7 @@ class EventRedactBehaviour(Names): class EventsWorkerStore(SQLBaseStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(EventsWorkerStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) if isinstance(database.engine, PostgresEngine): # If we're using Postgres than we can use `MultiWriterIdGenerator` diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index 1d76c761a6..cc538c5c10 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -24,9 +24,7 @@ BG_UPDATE_REMOVE_MEDIA_REPO_INDEX_WITHOUT_METHOD = ( class MediaRepositoryBackgroundUpdateStore(SQLBaseStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(MediaRepositoryBackgroundUpdateStore, self).__init__( - database, db_conn, hs - ) + super().__init__(database, db_conn, hs) self.db_pool.updates.register_background_index_update( update_name="local_media_repository_url_idx", @@ -94,7 +92,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): """Persistence for attachments and avatars""" def __init__(self, database: DatabasePool, db_conn, hs): - super(MediaRepositoryStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) async def get_local_media(self, media_id: str) -> Optional[Dict[str, Any]]: """Get the metadata for a local piece of media diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py index 1d793d3deb..e0cedd1aac 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py @@ -28,7 +28,7 @@ LAST_SEEN_GRANULARITY = 60 * 60 * 1000 class MonthlyActiveUsersWorkerStore(SQLBaseStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(MonthlyActiveUsersWorkerStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self._clock = hs.get_clock() self.hs = hs @@ -120,7 +120,7 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore): class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(MonthlyActiveUsersStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self._limit_usage_by_mau = hs.config.limit_usage_by_mau self._mau_stats_only = hs.config.mau_stats_only diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index b7a8d34ce1..e20a16f907 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -77,7 +77,7 @@ class PushRulesWorkerStore( """ def __init__(self, database: DatabasePool, db_conn, hs): - super(PushRulesWorkerStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) if hs.config.worker.worker_app is None: self._push_rules_stream_id_gen = StreamIdGenerator( diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 6568bddd81..f880b5e562 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -39,7 +39,7 @@ class ReceiptsWorkerStore(SQLBaseStore, metaclass=abc.ABCMeta): """ def __init__(self, database: DatabasePool, db_conn, hs): - super(ReceiptsWorkerStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self._receipts_stream_cache = StreamChangeCache( "ReceiptsRoomChangeCache", self.get_max_receipt_stream_id() @@ -386,7 +386,7 @@ class ReceiptsStore(ReceiptsWorkerStore): db_conn, "receipts_linearized", "stream_id" ) - super(ReceiptsStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) def get_max_receipt_stream_id(self): return self._receipts_id_gen.get_current_token() diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 01f20c03c2..675e81fe34 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -36,7 +36,7 @@ logger = logging.getLogger(__name__) class RegistrationWorkerStore(SQLBaseStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(RegistrationWorkerStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.config = hs.config self.clock = hs.get_clock() @@ -764,7 +764,7 @@ class RegistrationWorkerStore(SQLBaseStore): class RegistrationBackgroundUpdateStore(RegistrationWorkerStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(RegistrationBackgroundUpdateStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.clock = hs.get_clock() self.config = hs.config @@ -892,7 +892,7 @@ class RegistrationBackgroundUpdateStore(RegistrationWorkerStore): class RegistrationStore(RegistrationBackgroundUpdateStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(RegistrationStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self._account_validity = hs.config.account_validity self._ignore_unknown_session_error = hs.config.request_token_inhibit_3pid_errors diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 127588ce4c..bd6f9553c6 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -69,7 +69,7 @@ class RoomSortOrder(Enum): class RoomWorkerStore(SQLBaseStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(RoomWorkerStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.config = hs.config @@ -863,7 +863,7 @@ class RoomBackgroundUpdateStore(SQLBaseStore): ADD_ROOMS_ROOM_VERSION_COLUMN = "add_rooms_room_version_column" def __init__(self, database: DatabasePool, db_conn, hs): - super(RoomBackgroundUpdateStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.config = hs.config @@ -1074,7 +1074,7 @@ class RoomBackgroundUpdateStore(SQLBaseStore): class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(RoomStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.config = hs.config diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 91a8b43da3..4fa8767b01 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -55,7 +55,7 @@ _CURRENT_STATE_MEMBERSHIP_UPDATE_NAME = "current_state_events_membership" class RoomMemberWorkerStore(EventsWorkerStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(RoomMemberWorkerStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) # Is the current_state_events.membership up to date? Or is the # background update still running? @@ -819,7 +819,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): class RoomMemberBackgroundUpdateStore(SQLBaseStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(RoomMemberBackgroundUpdateStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.db_pool.updates.register_background_update_handler( _MEMBERSHIP_PROFILE_UPDATE_NAME, self._background_add_membership_profile ) @@ -973,7 +973,7 @@ class RoomMemberBackgroundUpdateStore(SQLBaseStore): class RoomMemberStore(RoomMemberWorkerStore, RoomMemberBackgroundUpdateStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(RoomMemberStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) async def forget(self, user_id: str, room_id: str) -> None: """Indicate that user_id wishes to discard history for room_id.""" diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index f01cf2fd02..e34fce6281 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -89,7 +89,7 @@ class SearchBackgroundUpdateStore(SearchWorkerStore): EVENT_SEARCH_USE_GIN_POSTGRES_NAME = "event_search_postgres_gin" def __init__(self, database: DatabasePool, db_conn, hs): - super(SearchBackgroundUpdateStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) if not hs.config.enable_search: return @@ -342,7 +342,7 @@ class SearchBackgroundUpdateStore(SearchWorkerStore): class SearchStore(SearchBackgroundUpdateStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(SearchStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) async def search_msgs(self, room_ids, search_term, keys): """Performs a full text search over events with given keys. diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index 5c6168e301..3c1e33819b 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -56,7 +56,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore): """ def __init__(self, database: DatabasePool, db_conn, hs): - super(StateGroupWorkerStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) async def get_room_version(self, room_id: str) -> RoomVersion: """Get the room_version of a given room @@ -320,7 +320,7 @@ class MainStateBackgroundUpdateStore(RoomMemberWorkerStore): DELETE_CURRENT_STATE_UPDATE_NAME = "delete_old_current_state_events" def __init__(self, database: DatabasePool, db_conn, hs): - super(MainStateBackgroundUpdateStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.server_name = hs.hostname @@ -506,4 +506,4 @@ class StateStore(StateGroupWorkerStore, MainStateBackgroundUpdateStore): """ def __init__(self, database: DatabasePool, db_conn, hs): - super(StateStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index 30840dbbaa..d7816a8606 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -61,7 +61,7 @@ TYPE_TO_ORIGIN_TABLE = {"room": ("rooms", "room_id"), "user": ("users", "name")} class StatsStore(StateDeltasStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(StatsStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.server_name = hs.hostname self.clock = self.hs.get_clock() diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 7dbe11513b..5dac78e574 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -266,7 +266,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore, metaclass=abc.ABCMeta): """ def __init__(self, database: DatabasePool, db_conn, hs: "HomeServer"): - super(StreamWorkerStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self._instance_name = hs.get_instance_name() self._send_federation = hs.should_send_federation() diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index 091367006e..99cffff50c 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -48,7 +48,7 @@ class TransactionStore(SQLBaseStore): """ def __init__(self, database: DatabasePool, db_conn, hs): - super(TransactionStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self._clock.looping_call(self._start_cleanup_transactions, 30 * 60 * 1000) diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index f2f9a5799a..5a390ff2f6 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -38,7 +38,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore): SHARE_PRIVATE_WORKING_SET = 500 def __init__(self, database: DatabasePool, db_conn, hs): - super(UserDirectoryBackgroundUpdateStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.server_name = hs.hostname @@ -564,7 +564,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore): SHARE_PRIVATE_WORKING_SET = 500 def __init__(self, database: DatabasePool, db_conn, hs): - super(UserDirectoryStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) async def remove_from_user_dir(self, user_id: str) -> None: def _remove_from_user_dir_txn(txn): diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index 139085b672..acb24e33af 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -181,7 +181,7 @@ class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore): STATE_GROUPS_ROOM_INDEX_UPDATE_NAME = "state_groups_room_id_idx" def __init__(self, database: DatabasePool, db_conn, hs): - super(StateBackgroundUpdateStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) self.db_pool.updates.register_background_update_handler( self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME, self._background_deduplicate_state, diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index e924f1ca3b..bec3780a32 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -52,7 +52,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): """ def __init__(self, database: DatabasePool, db_conn, hs): - super(StateGroupDataStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) # Originally the state store used a single DictionaryCache to cache the # event IDs for the state types in a given state group to avoid hammering diff --git a/synapse/util/manhole.py b/synapse/util/manhole.py index 631654f297..da24ba0470 100644 --- a/synapse/util/manhole.py +++ b/synapse/util/manhole.py @@ -94,7 +94,7 @@ class SynapseManhole(ColoredManhole): """Overrides connectionMade to create our own ManholeInterpreter""" def connectionMade(self): - super(SynapseManhole, self).connectionMade() + super().connectionMade() # replace the manhole interpreter with our own impl self.interpreter = SynapseManholeInterpreter(self, self.namespace) diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index 79869aaa44..a5cc9d0551 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -45,7 +45,7 @@ class NotRetryingDestination(Exception): """ msg = "Not retrying server %s." % (destination,) - super(NotRetryingDestination, self).__init__(msg) + super().__init__(msg) self.retry_last_ts = retry_last_ts self.retry_interval = retry_interval diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 210ddcbb88..366dcfb670 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -30,7 +30,7 @@ from tests import unittest, utils class E2eKeysHandlerTestCase(unittest.TestCase): def __init__(self, *args, **kwargs): - super(E2eKeysHandlerTestCase, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.hs = None # type: synapse.server.HomeServer self.handler = None # type: synapse.handlers.e2e_keys.E2eKeysHandler diff --git a/tests/handlers/test_e2e_room_keys.py b/tests/handlers/test_e2e_room_keys.py index 3362050ce0..7adde9b9de 100644 --- a/tests/handlers/test_e2e_room_keys.py +++ b/tests/handlers/test_e2e_room_keys.py @@ -47,7 +47,7 @@ room_keys = { class E2eRoomKeysHandlerTestCase(unittest.TestCase): def __init__(self, *args, **kwargs): - super(E2eRoomKeysHandlerTestCase, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.hs = None # type: synapse.server.HomeServer self.handler = None # type: synapse.handlers.e2e_keys.E2eRoomKeysHandler diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py index 561258a356..bc578411d6 100644 --- a/tests/replication/slave/storage/test_events.py +++ b/tests/replication/slave/storage/test_events.py @@ -58,7 +58,7 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): # Patch up the equality operator for events so that we can check # whether lists of events match using assertEquals self.unpatches = [patch__eq__(_EventInternalMetadata), patch__eq__(FrozenEvent)] - return super(SlavedEventStoreTestCase, self).setUp() + return super().setUp() def prepare(self, *args, **kwargs): super().prepare(*args, **kwargs) diff --git a/tests/rest/test_well_known.py b/tests/rest/test_well_known.py index b090bb974c..dcd65c2a50 100644 --- a/tests/rest/test_well_known.py +++ b/tests/rest/test_well_known.py @@ -21,7 +21,7 @@ from tests import unittest class WellKnownTests(unittest.HomeserverTestCase): def setUp(self): - super(WellKnownTests, self).setUp() + super().setUp() # replace the JsonResource with a WellKnownResource self.resource = WellKnownResource(self.hs) diff --git a/tests/server.py b/tests/server.py index 61ec670155..b404ad4e2a 100644 --- a/tests/server.py +++ b/tests/server.py @@ -260,7 +260,7 @@ class ThreadedMemoryReactorClock(MemoryReactorClock): return succeed(lookups[name]) self.nameResolver = SimpleResolverComplexifier(FakeResolver()) - super(ThreadedMemoryReactorClock, self).__init__() + super().__init__() def listenUDP(self, port, protocol, interface="", maxPacketSize=8196): p = udp.Port(port, protocol, interface, maxPacketSize, self) diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index cb808d4de4..46f94914ff 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -413,7 +413,7 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): # required for ApplicationServiceTransactionStoreTestCase tests class TestTransactionStore(ApplicationServiceTransactionStore, ApplicationServiceStore): def __init__(self, database: DatabasePool, db_conn, hs): - super(TestTransactionStore, self).__init__(database, db_conn, hs) + super().__init__(database, db_conn, hs) class ApplicationServiceStoreConfigTestCase(unittest.TestCase): diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py index 34ae8c9da7..ecb00f4e02 100644 --- a/tests/storage/test_devices.py +++ b/tests/storage/test_devices.py @@ -23,7 +23,7 @@ import tests.utils class DeviceStoreTestCase(tests.unittest.TestCase): def __init__(self, *args, **kwargs): - super(DeviceStoreTestCase, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.store = None # type: synapse.storage.DataStore @defer.inlineCallbacks diff --git a/tests/test_state.py b/tests/test_state.py index 2d58467932..80b0ccbc40 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -125,7 +125,7 @@ class StateGroupStore: class DictObj(dict): def __init__(self, **kwargs): - super(DictObj, self).__init__(kwargs) + super().__init__(kwargs) self.__dict__ = self diff --git a/tests/unittest.py b/tests/unittest.py index 128dd4e19c..dabf69cff4 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -92,7 +92,7 @@ class TestCase(unittest.TestCase): root logger's logging level while that test (case|method) runs.""" def __init__(self, methodName, *args, **kwargs): - super(TestCase, self).__init__(methodName, *args, **kwargs) + super().__init__(methodName, *args, **kwargs) method = getattr(self, methodName) From 36efbcaf511790d6f1dd7df2260900f07489bda6 Mon Sep 17 00:00:00 2001 From: reivilibre <38398653+reivilibre@users.noreply.github.com> Date: Fri, 18 Sep 2020 14:59:13 +0100 Subject: [PATCH 014/134] Catch-up after Federation Outage (bonus): Catch-up on Synapse Startup (#8322) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Olivier Wilkinson (reivilibre) Co-authored-by: Patrick Cloke * Fix _set_destination_retry_timings This came about because the code assumed that retry_interval could not be NULL — which has been challenged by catch-up. --- changelog.d/8230.bugfix | 1 + changelog.d/8230.misc | 1 - changelog.d/8247.bugfix | 1 + changelog.d/8247.misc | 1 - changelog.d/8258.bugfix | 1 + changelog.d/8258.misc | 1 - changelog.d/8322.bugfix | 1 + synapse/federation/sender/__init__.py | 51 ++++++++++ .../storage/databases/main/transactions.py | 66 ++++++++++++- tests/federation/test_federation_catch_up.py | 99 +++++++++++++++++++ 10 files changed, 218 insertions(+), 5 deletions(-) create mode 100644 changelog.d/8230.bugfix delete mode 100644 changelog.d/8230.misc create mode 100644 changelog.d/8247.bugfix delete mode 100644 changelog.d/8247.misc create mode 100644 changelog.d/8258.bugfix delete mode 100644 changelog.d/8258.misc create mode 100644 changelog.d/8322.bugfix diff --git a/changelog.d/8230.bugfix b/changelog.d/8230.bugfix new file mode 100644 index 0000000000..532d0e22fe --- /dev/null +++ b/changelog.d/8230.bugfix @@ -0,0 +1 @@ +Fix messages over federation being lost until an event is sent into the same room. diff --git a/changelog.d/8230.misc b/changelog.d/8230.misc deleted file mode 100644 index bf0ba76730..0000000000 --- a/changelog.d/8230.misc +++ /dev/null @@ -1 +0,0 @@ -Track the latest event for every destination and room for catch-up after federation outage. diff --git a/changelog.d/8247.bugfix b/changelog.d/8247.bugfix new file mode 100644 index 0000000000..532d0e22fe --- /dev/null +++ b/changelog.d/8247.bugfix @@ -0,0 +1 @@ +Fix messages over federation being lost until an event is sent into the same room. diff --git a/changelog.d/8247.misc b/changelog.d/8247.misc deleted file mode 100644 index 3c27803be4..0000000000 --- a/changelog.d/8247.misc +++ /dev/null @@ -1 +0,0 @@ -Track the `stream_ordering` of the last successfully-sent event to every destination, so we can use this information to 'catch up' a remote server after an outage. diff --git a/changelog.d/8258.bugfix b/changelog.d/8258.bugfix new file mode 100644 index 0000000000..532d0e22fe --- /dev/null +++ b/changelog.d/8258.bugfix @@ -0,0 +1 @@ +Fix messages over federation being lost until an event is sent into the same room. diff --git a/changelog.d/8258.misc b/changelog.d/8258.misc deleted file mode 100644 index 3c27803be4..0000000000 --- a/changelog.d/8258.misc +++ /dev/null @@ -1 +0,0 @@ -Track the `stream_ordering` of the last successfully-sent event to every destination, so we can use this information to 'catch up' a remote server after an outage. diff --git a/changelog.d/8322.bugfix b/changelog.d/8322.bugfix new file mode 100644 index 0000000000..532d0e22fe --- /dev/null +++ b/changelog.d/8322.bugfix @@ -0,0 +1 @@ +Fix messages over federation being lost until an event is sent into the same room. diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 41a726878d..8bb17b3a05 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -55,6 +55,15 @@ sent_pdus_destination_dist_total = Counter( "Total number of PDUs queued for sending across all destinations", ) +# Time (in s) after Synapse's startup that we will begin to wake up destinations +# that have catch-up outstanding. +CATCH_UP_STARTUP_DELAY_SEC = 15 + +# Time (in s) to wait in between waking up each destination, i.e. one destination +# will be woken up every seconds after Synapse's startup until we have woken +# every destination has outstanding catch-up. +CATCH_UP_STARTUP_INTERVAL_SEC = 5 + class FederationSender: def __init__(self, hs: "synapse.server.HomeServer"): @@ -125,6 +134,14 @@ class FederationSender: 1000.0 / hs.config.federation_rr_transactions_per_room_per_second ) + # wake up destinations that have outstanding PDUs to be caught up + self._catchup_after_startup_timer = self.clock.call_later( + CATCH_UP_STARTUP_DELAY_SEC, + run_as_background_process, + "wake_destinations_needing_catchup", + self._wake_destinations_needing_catchup, + ) + def _get_per_destination_queue(self, destination: str) -> PerDestinationQueue: """Get or create a PerDestinationQueue for the given destination @@ -560,3 +577,37 @@ class FederationSender: # Dummy implementation for case where federation sender isn't offloaded # to a worker. return [], 0, False + + async def _wake_destinations_needing_catchup(self): + """ + Wakes up destinations that need catch-up and are not currently being + backed off from. + + In order to reduce load spikes, adds a delay between each destination. + """ + + last_processed = None # type: Optional[str] + + while True: + destinations_to_wake = await self.store.get_catch_up_outstanding_destinations( + last_processed + ) + + if not destinations_to_wake: + # finished waking all destinations! + self._catchup_after_startup_timer = None + break + + destinations_to_wake = [ + d + for d in destinations_to_wake + if self._federation_shard_config.should_handle(self._instance_name, d) + ] + + for last_processed in destinations_to_wake: + logger.info( + "Destination %s has outstanding catch-up, waking up.", + last_processed, + ) + self.wake_destination(last_processed) + await self.clock.sleep(CATCH_UP_STARTUP_INTERVAL_SEC) diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index 99cffff50c..97aed1500e 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -218,6 +218,7 @@ class TransactionStore(SQLBaseStore): retry_interval = EXCLUDED.retry_interval WHERE EXCLUDED.retry_interval = 0 + OR destinations.retry_interval IS NULL OR destinations.retry_interval < EXCLUDED.retry_interval """ @@ -249,7 +250,11 @@ class TransactionStore(SQLBaseStore): "retry_interval": retry_interval, }, ) - elif retry_interval == 0 or prev_row["retry_interval"] < retry_interval: + elif ( + retry_interval == 0 + or prev_row["retry_interval"] is None + or prev_row["retry_interval"] < retry_interval + ): self.db_pool.simple_update_one_txn( txn, "destinations", @@ -397,7 +402,7 @@ class TransactionStore(SQLBaseStore): @staticmethod def _get_catch_up_room_event_ids_txn( - txn, destination: str, last_successful_stream_ordering: int, + txn: LoggingTransaction, destination: str, last_successful_stream_ordering: int, ) -> List[str]: q = """ SELECT event_id FROM destination_rooms @@ -412,3 +417,60 @@ class TransactionStore(SQLBaseStore): ) event_ids = [row[0] for row in txn] return event_ids + + async def get_catch_up_outstanding_destinations( + self, after_destination: Optional[str] + ) -> List[str]: + """ + Gets at most 25 destinations which have outstanding PDUs to be caught up, + and are not being backed off from + Args: + after_destination: + If provided, all destinations must be lexicographically greater + than this one. + + Returns: + list of up to 25 destinations with outstanding catch-up. + These are the lexicographically first destinations which are + lexicographically greater than after_destination (if provided). + """ + time = self.hs.get_clock().time_msec() + + return await self.db_pool.runInteraction( + "get_catch_up_outstanding_destinations", + self._get_catch_up_outstanding_destinations_txn, + time, + after_destination, + ) + + @staticmethod + def _get_catch_up_outstanding_destinations_txn( + txn: LoggingTransaction, now_time_ms: int, after_destination: Optional[str] + ) -> List[str]: + q = """ + SELECT destination FROM destinations + WHERE destination IN ( + SELECT destination FROM destination_rooms + WHERE destination_rooms.stream_ordering > + destinations.last_successful_stream_ordering + ) + AND destination > ? + AND ( + retry_last_ts IS NULL OR + retry_last_ts + retry_interval < ? + ) + ORDER BY destination + LIMIT 25 + """ + txn.execute( + q, + ( + # everything is lexicographically greater than "" so this gives + # us the first batch of up to 25. + after_destination or "", + now_time_ms, + ), + ) + + destinations = [row[0] for row in txn] + return destinations diff --git a/tests/federation/test_federation_catch_up.py b/tests/federation/test_federation_catch_up.py index cc52c3dfac..1a3ccb263d 100644 --- a/tests/federation/test_federation_catch_up.py +++ b/tests/federation/test_federation_catch_up.py @@ -321,3 +321,102 @@ class FederationCatchUpTestCases(FederatingHomeserverTestCase): per_dest_queue._last_successful_stream_ordering, event_5.internal_metadata.stream_ordering, ) + + @override_config({"send_federation": True}) + def test_catch_up_on_synapse_startup(self): + """ + Tests the behaviour of get_catch_up_outstanding_destinations and + _wake_destinations_needing_catchup. + """ + + # list of sorted server names (note that there are more servers than the batch + # size used in get_catch_up_outstanding_destinations). + server_names = ["server%02d" % number for number in range(42)] + ["zzzerver"] + + # ARRANGE: + # - a local user (u1) + # - a room which u1 is joined to (and remote users @user:serverXX are + # joined to) + + # mark the remotes as online + self.is_online = True + + self.register_user("u1", "you the one") + u1_token = self.login("u1", "you the one") + room_id = self.helper.create_room_as("u1", tok=u1_token) + + for server_name in server_names: + self.get_success( + event_injection.inject_member_event( + self.hs, room_id, "@user:%s" % server_name, "join" + ) + ) + + # create an event + self.helper.send(room_id, "deary me!", tok=u1_token) + + # ASSERT: + # - All servers are up to date so none should have outstanding catch-up + outstanding_when_successful = self.get_success( + self.hs.get_datastore().get_catch_up_outstanding_destinations(None) + ) + self.assertEqual(outstanding_when_successful, []) + + # ACT: + # - Make the remote servers unreachable + self.is_online = False + + # - Mark zzzerver as being backed-off from + now = self.clock.time_msec() + self.get_success( + self.hs.get_datastore().set_destination_retry_timings( + "zzzerver", now, now, 24 * 60 * 60 * 1000 # retry in 1 day + ) + ) + + # - Send an event + self.helper.send(room_id, "can anyone hear me?", tok=u1_token) + + # ASSERT (get_catch_up_outstanding_destinations): + # - all remotes are outstanding + # - they are returned in batches of 25, in order + outstanding_1 = self.get_success( + self.hs.get_datastore().get_catch_up_outstanding_destinations(None) + ) + + self.assertEqual(len(outstanding_1), 25) + self.assertEqual(outstanding_1, server_names[0:25]) + + outstanding_2 = self.get_success( + self.hs.get_datastore().get_catch_up_outstanding_destinations( + outstanding_1[-1] + ) + ) + self.assertNotIn("zzzerver", outstanding_2) + self.assertEqual(len(outstanding_2), 17) + self.assertEqual(outstanding_2, server_names[25:-1]) + + # ACT: call _wake_destinations_needing_catchup + + # patch wake_destination to just count the destinations instead + woken = [] + + def wake_destination_track(destination): + woken.append(destination) + + self.hs.get_federation_sender().wake_destination = wake_destination_track + + # cancel the pre-existing timer for _wake_destinations_needing_catchup + # this is because we are calling it manually rather than waiting for it + # to be called automatically + self.hs.get_federation_sender()._catchup_after_startup_timer.cancel() + + self.get_success( + self.hs.get_federation_sender()._wake_destinations_needing_catchup(), by=5.0 + ) + + # ASSERT (_wake_destinations_needing_catchup): + # - all remotes are woken up, save for zzzerver + self.assertNotIn("zzzerver", woken) + # - all destinations are woken exactly once; they appear once in woken. + self.assertCountEqual(woken, server_names[:-1]) From 5b70acb44c730109029a5624238cdda80e109b17 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Fri, 18 Sep 2020 15:00:07 +0100 Subject: [PATCH 015/134] 1.19.3 --- CHANGES.md | 9 +++++++++ changelog.d/8350.bugfix | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/8350.bugfix diff --git a/CHANGES.md b/CHANGES.md index d82b30c66c..f20566e71e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.19.3 (2020-09-18) +=========================== + +Bugfixes +-------- + +- Partially mitigate bug where newly joined servers couldn't get past events in a room when there is a malformed event. ([\#8350](https://github.com/matrix-org/synapse/issues/8350)) + + Synapse 1.19.2 (2020-09-16) =========================== diff --git a/changelog.d/8350.bugfix b/changelog.d/8350.bugfix deleted file mode 100644 index 0e493c0282..0000000000 --- a/changelog.d/8350.bugfix +++ /dev/null @@ -1 +0,0 @@ -Partially mitigate bug where newly joined servers couldn't get past events in a room when there is a malformed event. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index 6d60db6084..82125220aa 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.19.3) stable; urgency=medium + + * New synapse release 1.19.3. + + -- Synapse Packaging team Fri, 18 Sep 2020 14:59:30 +0100 + matrix-synapse-py3 (1.19.2) stable; urgency=medium * New synapse release 1.19.2. diff --git a/synapse/__init__.py b/synapse/__init__.py index 078914695a..56705707e0 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -48,7 +48,7 @@ try: except ImportError: pass -__version__ = "1.19.2" +__version__ = "1.19.3" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From d688b4bafca58dfff1be35615d6ff1e202d47cc6 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Fri, 18 Sep 2020 16:26:36 +0200 Subject: [PATCH 016/134] Admin API for querying rooms where a user is a member (#8306) Add a new admin API `GET /_synapse/admin/v1/users//joined_rooms` to list all rooms where a user is a member. --- changelog.d/8306.feature | 1 + docs/admin_api/user_admin_api.rst | 37 ++++++++++++ synapse/rest/admin/__init__.py | 2 + synapse/rest/admin/users.py | 26 +++++++++ tests/rest/admin/test_user.py | 96 ++++++++++++++++++++++++++++++- 5 files changed, 160 insertions(+), 2 deletions(-) create mode 100644 changelog.d/8306.feature diff --git a/changelog.d/8306.feature b/changelog.d/8306.feature new file mode 100644 index 0000000000..5c23da4030 --- /dev/null +++ b/changelog.d/8306.feature @@ -0,0 +1 @@ +Add an admin API for querying rooms where a user is a member. Contributed by @dklimpel. \ No newline at end of file diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst index e21c78a9c6..7ca902faba 100644 --- a/docs/admin_api/user_admin_api.rst +++ b/docs/admin_api/user_admin_api.rst @@ -304,6 +304,43 @@ To use it, you will need to authenticate by providing an ``access_token`` for a server admin: see `README.rst `_. +List room memberships of an user +================================ +Gets a list of all ``room_id`` that a specific ``user_id`` is member. + +The API is:: + + GET /_synapse/admin/v1/users//joined_rooms + +To use it, you will need to authenticate by providing an ``access_token`` for a +server admin: see `README.rst `_. + +A response body like the following is returned: + +.. code:: json + + { + "joined_rooms": [ + "!DuGcnbhHGaSZQoNQR:matrix.org", + "!ZtSaPCawyWtxfWiIy:matrix.org" + ], + "total": 2 + } + +**Parameters** + +The following parameters should be set in the URL: + +- ``user_id`` - fully qualified: for example, ``@user:server.com``. + +**Response** + +The following fields are returned in the JSON response body: + +- ``joined_rooms`` - An array of ``room_id``. +- ``total`` - Number of rooms. + + User devices ============ diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index abf362c7b7..4a75c06480 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -49,6 +49,7 @@ from synapse.rest.admin.users import ( ResetPasswordRestServlet, SearchUsersRestServlet, UserAdminServlet, + UserMembershipRestServlet, UserRegisterServlet, UserRestServletV2, UsersRestServlet, @@ -209,6 +210,7 @@ def register_servlets(hs, http_server): SendServerNoticeServlet(hs).register(http_server) VersionServlet(hs).register(http_server) UserAdminServlet(hs).register(http_server) + UserMembershipRestServlet(hs).register(http_server) UserRestServletV2(hs).register(http_server) UsersRestServletV2(hs).register(http_server) DeviceRestServlet(hs).register(http_server) diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 0f537031c4..20dc1d0e05 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -683,3 +683,29 @@ class UserAdminServlet(RestServlet): await self.store.set_server_admin(target_user, set_admin_to) return 200, {} + + +class UserMembershipRestServlet(RestServlet): + """ + Get room list of an user. + """ + + PATTERNS = admin_patterns("/users/(?P[^/]+)/joined_rooms$") + + def __init__(self, hs): + self.is_mine = hs.is_mine + self.auth = hs.get_auth() + self.store = hs.get_datastore() + + async def on_GET(self, request, user_id): + await assert_requester_is_admin(self.auth, request) + + if not self.is_mine(UserID.from_string(user_id)): + raise SynapseError(400, "Can only lookup local users") + + room_ids = await self.store.get_rooms_for_user(user_id) + if not room_ids: + raise NotFoundError("User not found") + + ret = {"joined_rooms": list(room_ids), "total": len(room_ids)} + return 200, ret diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index b8b7758d24..f96011fc1c 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -22,8 +22,8 @@ from mock import Mock import synapse.rest.admin from synapse.api.constants import UserTypes -from synapse.api.errors import HttpResponseException, ResourceLimitError -from synapse.rest.client.v1 import login +from synapse.api.errors import Codes, HttpResponseException, ResourceLimitError +from synapse.rest.client.v1 import login, room from synapse.rest.client.v2_alpha import sync from tests import unittest @@ -995,3 +995,95 @@ class UserRestTestCase(unittest.HomeserverTestCase): # Ensure they're still alive self.assertEqual(0, channel.json_body["deactivated"]) + + +class UserMembershipRestTestCase(unittest.HomeserverTestCase): + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + sync.register_servlets, + room.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + self.store = hs.get_datastore() + + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + self.other_user = self.register_user("user", "pass") + self.url = "/_synapse/admin/v1/users/%s/joined_rooms" % urllib.parse.quote( + self.other_user + ) + + def test_no_auth(self): + """ + Try to list rooms of an user without authentication. + """ + request, channel = self.make_request("GET", self.url, b"{}") + self.render(request) + + self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) + + def test_requester_is_no_admin(self): + """ + If the user is not a server admin, an error is returned. + """ + other_user_token = self.login("user", "pass") + + request, channel = self.make_request( + "GET", self.url, access_token=other_user_token, + ) + self.render(request) + + self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + def test_user_does_not_exist(self): + """ + Tests that a lookup for a user that does not exist returns a 404 + """ + url = "/_synapse/admin/v1/users/@unknown_person:test/joined_rooms" + request, channel = self.make_request( + "GET", url, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(404, channel.code, msg=channel.json_body) + self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) + + def test_user_is_not_local(self): + """ + Tests that a lookup for a user that is not a local returns a 400 + """ + url = "/_synapse/admin/v1/users/@unknown_person:unknown_domain/joined_rooms" + + request, channel = self.make_request( + "GET", url, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, channel.code, msg=channel.json_body) + self.assertEqual("Can only lookup local users", channel.json_body["error"]) + + def test_get_rooms(self): + """ + Tests that a normal lookup for rooms is successfully + """ + # Create rooms and join + other_user_tok = self.login("user", "pass") + number_rooms = 5 + for n in range(number_rooms): + self.helper.create_room_as(self.other_user, tok=other_user_tok) + + # Get rooms + request, channel = self.make_request( + "GET", self.url, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual(number_rooms, channel.json_body["total"]) + self.assertEqual(number_rooms, len(channel.json_body["joined_rooms"])) From d5f7182ba15647f1c900883b7edbe898e32f012b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 18 Sep 2020 10:56:50 -0400 Subject: [PATCH 017/134] 1.20.0rc5 --- CHANGES.md | 22 ++++++++++++++++++++++ changelog.d/8285.misc | 1 - changelog.d/8342.bugfix | 1 - changelog.d/8343.feature | 1 - changelog.d/8349.bugfix | 1 - synapse/__init__.py | 2 +- 6 files changed, 23 insertions(+), 5 deletions(-) delete mode 100644 changelog.d/8285.misc delete mode 100644 changelog.d/8342.bugfix delete mode 100644 changelog.d/8343.feature delete mode 100644 changelog.d/8349.bugfix diff --git a/CHANGES.md b/CHANGES.md index aade896bd5..de869001b3 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,25 @@ +Synapse 1.20.0rc5 (2020-09-18) +============================== + +Features +-------- + +- Add flags to the `/versions` endpoint that includes whether new rooms default to using E2EE. ([\#8343](https://github.com/matrix-org/synapse/issues/8343)) + + +Bugfixes +-------- + +- Fix ratelimitng of federation `/send` requests. ([\#8342](https://github.com/matrix-org/synapse/issues/8342)) +- Fix a longstanding bug where back pagination over federation could get stuck if it failed to handle a received event. ([\#8349](https://github.com/matrix-org/synapse/issues/8349)) + + +Internal Changes +---------------- + +- Blacklist [MSC2753](https://github.com/matrix-org/matrix-doc/pull/2753) SyTests until it is implemented. ([\#8285](https://github.com/matrix-org/synapse/issues/8285)) + + Synapse 1.19.3 (2020-09-18) =========================== diff --git a/changelog.d/8285.misc b/changelog.d/8285.misc deleted file mode 100644 index 4646664ba1..0000000000 --- a/changelog.d/8285.misc +++ /dev/null @@ -1 +0,0 @@ -Blacklist [MSC2753](https://github.com/matrix-org/matrix-doc/pull/2753) SyTests until it is implemented. \ No newline at end of file diff --git a/changelog.d/8342.bugfix b/changelog.d/8342.bugfix deleted file mode 100644 index 786057facb..0000000000 --- a/changelog.d/8342.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix ratelimitng of federation `/send` requests. diff --git a/changelog.d/8343.feature b/changelog.d/8343.feature deleted file mode 100644 index ccecb22f37..0000000000 --- a/changelog.d/8343.feature +++ /dev/null @@ -1 +0,0 @@ -Add flags to the `/versions` endpoint that includes whether new rooms default to using E2EE. diff --git a/changelog.d/8349.bugfix b/changelog.d/8349.bugfix deleted file mode 100644 index cf2f531b14..0000000000 --- a/changelog.d/8349.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a longstanding bug where back pagination over federation could get stuck if it failed to handle a received event. diff --git a/synapse/__init__.py b/synapse/__init__.py index 6b11c5681b..a95753dcc7 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -48,7 +48,7 @@ try: except ImportError: pass -__version__ = "1.20.0rc4" +__version__ = "1.20.0rc5" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From c4e8b18c72365c9d8f592e74519f8fe5a6cf402b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 18 Sep 2020 10:57:29 -0400 Subject: [PATCH 018/134] Tweak wording in the changelog. --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index de869001b3..7909386b47 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,13 +4,13 @@ Synapse 1.20.0rc5 (2020-09-18) Features -------- -- Add flags to the `/versions` endpoint that includes whether new rooms default to using E2EE. ([\#8343](https://github.com/matrix-org/synapse/issues/8343)) +- Add flags to the `/versions` endpoint for whether new rooms default to using E2EE. ([\#8343](https://github.com/matrix-org/synapse/issues/8343)) Bugfixes -------- -- Fix ratelimitng of federation `/send` requests. ([\#8342](https://github.com/matrix-org/synapse/issues/8342)) +- Fix rate limiting of federation `/send` requests. ([\#8342](https://github.com/matrix-org/synapse/issues/8342)) - Fix a longstanding bug where back pagination over federation could get stuck if it failed to handle a received event. ([\#8349](https://github.com/matrix-org/synapse/issues/8349)) From c7e060bfee89ebea599b22fc402fae8336b1afed Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 18 Sep 2020 11:10:59 -0400 Subject: [PATCH 019/134] Add a note about including the changes from 1.19.3. --- CHANGES.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 7909386b47..84976ab2bd 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,8 @@ Synapse 1.20.0rc5 (2020-09-18) ============================== +In addition to the below, Synapse 1.20.0rc5 also includes the bug fix that was included in 1.19.3. + Features -------- From babc0275431c68e64050db11959d74a636afbd3e Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 18 Sep 2020 12:54:04 -0400 Subject: [PATCH 020/134] Fix a bad merge from release-v1.20.0. (#8354) --- changelog.d/8354.misc | 1 + synapse/handlers/pagination.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8354.misc diff --git a/changelog.d/8354.misc b/changelog.d/8354.misc new file mode 100644 index 0000000000..1d33cde2da --- /dev/null +++ b/changelog.d/8354.misc @@ -0,0 +1 @@ +Fix bad merge from `release-v1.20.0` branch to `develop`. diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index f132ed3368..a0b3bdb5e0 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -385,7 +385,7 @@ class PaginationHandler: ) await self.hs.get_handlers().federation_handler.maybe_backfill( - room_id, curr_topo, limit=source_config.limit, + room_id, curr_topo, limit=pagin_config.limit, ) to_room_key = None From 4f3096d866a9810b1c982669d9567fe47b2db73f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 21 Sep 2020 12:34:06 +0100 Subject: [PATCH 021/134] Add a comment re #1691 --- synapse/crypto/context_factory.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py index 2b03f5ac76..79668a402e 100644 --- a/synapse/crypto/context_factory.py +++ b/synapse/crypto/context_factory.py @@ -45,7 +45,11 @@ _TLS_VERSION_MAP = { class ServerContextFactory(ContextFactory): """Factory for PyOpenSSL SSL contexts that are used to handle incoming - connections.""" + connections. + + TODO: replace this with an implementation of IOpenSSLServerConnectionCreator, + per https://github.com/matrix-org/synapse/issues/1691 + """ def __init__(self, config): # TODO: once pyOpenSSL exposes TLS_METHOD and SSL_CTX_set_min_proto_version, From 37ca5924bddccc37521798236339b539677d101f Mon Sep 17 00:00:00 2001 From: Dionysis Grigoropoulos Date: Tue, 22 Sep 2020 13:42:55 +0300 Subject: [PATCH 022/134] Create function to check for long names in devices (#8364) * Create a new function to verify that the length of a device name is under a certain threshold. * Refactor old code and tests to use said function. * Verify device name length during registration of device * Add a test for the above Signed-off-by: Dionysis Grigoropoulos --- changelog.d/8364.bugfix | 2 ++ synapse/handlers/device.py | 30 ++++++++++++++++++++++++------ tests/handlers/test_device.py | 11 +++++++++++ tests/rest/admin/test_device.py | 2 +- 4 files changed, 38 insertions(+), 7 deletions(-) create mode 100644 changelog.d/8364.bugfix diff --git a/changelog.d/8364.bugfix b/changelog.d/8364.bugfix new file mode 100644 index 0000000000..7b82cbc388 --- /dev/null +++ b/changelog.d/8364.bugfix @@ -0,0 +1,2 @@ +Fix a bug where during device registration the length of the device name wasn't +limited. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 55a9787439..4149520d6c 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -20,6 +20,7 @@ from typing import Any, Dict, List, Optional from synapse.api import errors from synapse.api.constants import EventTypes from synapse.api.errors import ( + Codes, FederationDeniedError, HttpResponseException, RequestSendFailed, @@ -265,6 +266,24 @@ class DeviceHandler(DeviceWorkerHandler): hs.get_distributor().observe("user_left_room", self.user_left_room) + def _check_device_name_length(self, name: str): + """ + Checks whether a device name is longer than the maximum allowed length. + + Args: + name: The name of the device. + + Raises: + SynapseError: if the device name is too long. + """ + if name and len(name) > MAX_DEVICE_DISPLAY_NAME_LEN: + raise SynapseError( + 400, + "Device display name is too long (max %i)" + % (MAX_DEVICE_DISPLAY_NAME_LEN,), + errcode=Codes.TOO_LARGE, + ) + async def check_device_registered( self, user_id, device_id, initial_device_display_name=None ): @@ -282,6 +301,9 @@ class DeviceHandler(DeviceWorkerHandler): Returns: str: device id (generated if none was supplied) """ + + self._check_device_name_length(initial_device_display_name) + if device_id is not None: new_device = await self.store.store_device( user_id=user_id, @@ -397,12 +419,8 @@ class DeviceHandler(DeviceWorkerHandler): # Reject a new displayname which is too long. new_display_name = content.get("display_name") - if new_display_name and len(new_display_name) > MAX_DEVICE_DISPLAY_NAME_LEN: - raise SynapseError( - 400, - "Device display name is too long (max %i)" - % (MAX_DEVICE_DISPLAY_NAME_LEN,), - ) + + self._check_device_name_length(new_display_name) try: await self.store.update_device( diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 6aa322bf3a..969d44c787 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -35,6 +35,17 @@ class DeviceTestCase(unittest.HomeserverTestCase): # These tests assume that it starts 1000 seconds in. self.reactor.advance(1000) + def test_device_is_created_with_invalid_name(self): + self.get_failure( + self.handler.check_device_registered( + user_id="@boris:foo", + device_id="foo", + initial_device_display_name="a" + * (synapse.handlers.device.MAX_DEVICE_DISPLAY_NAME_LEN + 1), + ), + synapse.api.errors.SynapseError, + ) + def test_device_is_created_if_doesnt_exist(self): res = self.get_success( self.handler.check_device_registered( diff --git a/tests/rest/admin/test_device.py b/tests/rest/admin/test_device.py index faa7f381a9..92c9058887 100644 --- a/tests/rest/admin/test_device.py +++ b/tests/rest/admin/test_device.py @@ -221,7 +221,7 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): self.render(request) self.assertEqual(400, channel.code, msg=channel.json_body) - self.assertEqual(Codes.UNKNOWN, channel.json_body["errcode"]) + self.assertEqual(Codes.TOO_LARGE, channel.json_body["errcode"]) # Ensure the display name was not updated. request, channel = self.make_request( From 55bb5fda339f8ec232e8b2a65df01f1597e594ee Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 22 Sep 2020 15:18:31 +0100 Subject: [PATCH 023/134] 1.20.0 --- CHANGES.md | 6 ++++++ debian/changelog | 8 ++++++-- synapse/__init__.py | 2 +- 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 84976ab2bd..5a846daa4d 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 1.20.0 (2020-09-22) +=========================== + +No significant changes. + + Synapse 1.20.0rc5 (2020-09-18) ============================== diff --git a/debian/changelog b/debian/changelog index dbf01d6b1e..ae548f9f33 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,8 +1,12 @@ -matrix-synapse-py3 (1.20.0ubuntu1) UNRELEASED; urgency=medium +matrix-synapse-py3 (1.20.0) stable; urgency=medium + [ Synapse Packaging team ] + * New synapse release 1.20.0. + + [ Dexter Chua ] * Use Type=notify in systemd service - -- Dexter Chua Wed, 26 Aug 2020 12:41:36 +0000 + -- Synapse Packaging team Tue, 22 Sep 2020 15:19:32 +0100 matrix-synapse-py3 (1.19.3) stable; urgency=medium diff --git a/synapse/__init__.py b/synapse/__init__.py index a95753dcc7..8242d05f60 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -48,7 +48,7 @@ try: except ImportError: pass -__version__ = "1.20.0rc5" +__version__ = "1.20.0" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 012736ff070573fa15611a178e83421f4998abb7 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 22 Sep 2020 15:30:44 +0100 Subject: [PATCH 024/134] Deprecation warning for synapse admin api being accessible under /_matrix --- CHANGES.md | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 5a846daa4d..bac099e9b5 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,7 +1,19 @@ Synapse 1.20.0 (2020-09-22) =========================== -No significant changes. +No significant changes since v1.20.0rc5. + +Removal warning +--------------- + +Historically, the [Synapse Admin +API](https://github.com/matrix-org/synapse/tree/master/docs) has been +accessible under the `/_matrix/client/api/v1/admin`, +`/_matrix/client/unstable/admin`, `/_matrix/client/r0/admin` and +`/_synapse/admin` prefixes. In a future release, we will soon be dropping +support for accessing Synapse's Admin API using the `/_matrix/client/*` +prefixes. This is to help make locking down external access to the Admin API +endpoints easier for homeserver admins. Synapse 1.20.0rc5 (2020-09-18) From d191dbdaa6b0a00c1148f0464f542c15db973efa Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 22 Sep 2020 15:42:53 +0100 Subject: [PATCH 025/134] Fix wording of deprecation notice in changelog --- CHANGES.md | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index bac099e9b5..84711de448 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -10,11 +10,10 @@ Historically, the [Synapse Admin API](https://github.com/matrix-org/synapse/tree/master/docs) has been accessible under the `/_matrix/client/api/v1/admin`, `/_matrix/client/unstable/admin`, `/_matrix/client/r0/admin` and -`/_synapse/admin` prefixes. In a future release, we will soon be dropping -support for accessing Synapse's Admin API using the `/_matrix/client/*` -prefixes. This is to help make locking down external access to the Admin API -endpoints easier for homeserver admins. - +`/_synapse/admin` prefixes. In a future release, we will be dropping support +for accessing Synapse's Admin API using the `/_matrix/client/*` prefixes. This +makes it easier for homeserver admins to lock down external access to the Admin +API endpoints. Synapse 1.20.0rc5 (2020-09-18) ============================== From 4da01f9c614f36a293235d6a1fd3602d550f2001 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Tue, 22 Sep 2020 19:15:04 +0200 Subject: [PATCH 026/134] Admin API for reported events (#8217) Add an admin API to read entries of table `event_reports`. API: `GET /_synapse/admin/v1/event_reports` --- changelog.d/8217.feature | 1 + docs/admin_api/event_reports.rst | 129 +++++++++ synapse/rest/admin/__init__.py | 2 + synapse/rest/admin/event_reports.py | 88 ++++++ synapse/storage/databases/main/room.py | 95 ++++++ tests/rest/admin/test_event_reports.py | 382 +++++++++++++++++++++++++ 6 files changed, 697 insertions(+) create mode 100644 changelog.d/8217.feature create mode 100644 docs/admin_api/event_reports.rst create mode 100644 synapse/rest/admin/event_reports.py create mode 100644 tests/rest/admin/test_event_reports.py diff --git a/changelog.d/8217.feature b/changelog.d/8217.feature new file mode 100644 index 0000000000..899cbf14ef --- /dev/null +++ b/changelog.d/8217.feature @@ -0,0 +1 @@ +Add an admin API `GET /_synapse/admin/v1/event_reports` to read entries of table `event_reports`. Contributed by @dklimpel. \ No newline at end of file diff --git a/docs/admin_api/event_reports.rst b/docs/admin_api/event_reports.rst new file mode 100644 index 0000000000..461be01230 --- /dev/null +++ b/docs/admin_api/event_reports.rst @@ -0,0 +1,129 @@ +Show reported events +==================== + +This API returns information about reported events. + +The api is:: + + GET /_synapse/admin/v1/event_reports?from=0&limit=10 + +To use it, you will need to authenticate by providing an ``access_token`` for a +server admin: see `README.rst `_. + +It returns a JSON body like the following: + +.. code:: jsonc + + { + "event_reports": [ + { + "content": { + "reason": "foo", + "score": -100 + }, + "event_id": "$bNUFCwGzWca1meCGkjp-zwslF-GfVcXukvRLI1_FaVY", + "event_json": { + "auth_events": [ + "$YK4arsKKcc0LRoe700pS8DSjOvUT4NDv0HfInlMFw2M", + "$oggsNXxzPFRE3y53SUNd7nsj69-QzKv03a1RucHu-ws" + ], + "content": { + "body": "matrix.org: This Week in Matrix", + "format": "org.matrix.custom.html", + "formatted_body": "matrix.org:
This Week in Matrix", + "msgtype": "m.notice" + }, + "depth": 546, + "hashes": { + "sha256": "xK1//xnmvHJIOvbgXlkI8eEqdvoMmihVDJ9J4SNlsAw" + }, + "origin": "matrix.org", + "origin_server_ts": 1592291711430, + "prev_events": [ + "$YK4arsKKcc0LRoe700pS8DSjOvUT4NDv0HfInlMFw2M" + ], + "prev_state": [], + "room_id": "!ERAgBpSOcCCuTJqQPk:matrix.org", + "sender": "@foobar:matrix.org", + "signatures": { + "matrix.org": { + "ed25519:a_JaEG": "cs+OUKW/iHx5pEidbWxh0UiNNHwe46Ai9LwNz+Ah16aWDNszVIe2gaAcVZfvNsBhakQTew51tlKmL2kspXk/Dg" + } + }, + "type": "m.room.message", + "unsigned": { + "age_ts": 1592291711430, + } + }, + "id": 2, + "reason": "foo", + "received_ts": 1570897107409, + "room_alias": "#alias1:matrix.org", + "room_id": "!ERAgBpSOcCCuTJqQPk:matrix.org", + "sender": "@foobar:matrix.org", + "user_id": "@foo:matrix.org" + }, + { + "content": { + "reason": "bar", + "score": -100 + }, + "event_id": "$3IcdZsDaN_En-S1DF4EMCy3v4gNRKeOJs8W5qTOKj4I", + "event_json": { + // hidden items + // see above + }, + "id": 3, + "reason": "bar", + "received_ts": 1598889612059, + "room_alias": "#alias2:matrix.org", + "room_id": "!eGvUQuTCkHGVwNMOjv:matrix.org", + "sender": "@foobar:matrix.org", + "user_id": "@bar:matrix.org" + } + ], + "next_token": 2, + "total": 4 + } + +To paginate, check for ``next_token`` and if present, call the endpoint again +with ``from`` set to the value of ``next_token``. This will return a new page. + +If the endpoint does not return a ``next_token`` then there are no more +reports to paginate through. + +**URL parameters:** + +- ``limit``: integer - Is optional but is used for pagination, + denoting the maximum number of items to return in this call. Defaults to ``100``. +- ``from``: integer - Is optional but used for pagination, + denoting the offset in the returned results. This should be treated as an opaque value and + not explicitly set to anything other than the return value of ``next_token`` from a previous call. + Defaults to ``0``. +- ``dir``: string - Direction of event report order. Whether to fetch the most recent first (``b``) or the + oldest first (``f``). Defaults to ``b``. +- ``user_id``: string - Is optional and filters to only return users with user IDs that contain this value. + This is the user who reported the event and wrote the reason. +- ``room_id``: string - Is optional and filters to only return rooms with room IDs that contain this value. + +**Response** + +The following fields are returned in the JSON response body: + +- ``id``: integer - ID of event report. +- ``received_ts``: integer - The timestamp (in milliseconds since the unix epoch) when this report was sent. +- ``room_id``: string - The ID of the room in which the event being reported is located. +- ``event_id``: string - The ID of the reported event. +- ``user_id``: string - This is the user who reported the event and wrote the reason. +- ``reason``: string - Comment made by the ``user_id`` in this report. May be blank. +- ``content``: object - Content of reported event. + + - ``reason``: string - Comment made by the ``user_id`` in this report. May be blank. + - ``score``: integer - Content is reported based upon a negative score, where -100 is "most offensive" and 0 is "inoffensive". + +- ``sender``: string - This is the ID of the user who sent the original message/event that was reported. +- ``room_alias``: string - The alias of the room. ``null`` if the room does not have a canonical alias set. +- ``event_json``: object - Details of the original event that was reported. +- ``next_token``: integer - Indication for pagination. See above. +- ``total``: integer - Total number of event reports related to the query (``user_id`` and ``room_id``). + diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 4a75c06480..5c5f00b213 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -31,6 +31,7 @@ from synapse.rest.admin.devices import ( DeviceRestServlet, DevicesRestServlet, ) +from synapse.rest.admin.event_reports import EventReportsRestServlet from synapse.rest.admin.groups import DeleteGroupAdminRestServlet from synapse.rest.admin.media import ListMediaInRoom, register_servlets_for_media_repo from synapse.rest.admin.purge_room_servlet import PurgeRoomServlet @@ -216,6 +217,7 @@ def register_servlets(hs, http_server): DeviceRestServlet(hs).register(http_server) DevicesRestServlet(hs).register(http_server) DeleteDevicesRestServlet(hs).register(http_server) + EventReportsRestServlet(hs).register(http_server) def register_servlets_for_client_rest_resource(hs, http_server): diff --git a/synapse/rest/admin/event_reports.py b/synapse/rest/admin/event_reports.py new file mode 100644 index 0000000000..5b8d0594cd --- /dev/null +++ b/synapse/rest/admin/event_reports.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Dirk Klimpel +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from synapse.api.errors import Codes, SynapseError +from synapse.http.servlet import RestServlet, parse_integer, parse_string +from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin + +logger = logging.getLogger(__name__) + + +class EventReportsRestServlet(RestServlet): + """ + List all reported events that are known to the homeserver. Results are returned + in a dictionary containing report information. Supports pagination. + The requester must have administrator access in Synapse. + + GET /_synapse/admin/v1/event_reports + returns: + 200 OK with list of reports if success otherwise an error. + + Args: + The parameters `from` and `limit` are required only for pagination. + By default, a `limit` of 100 is used. + The parameter `dir` can be used to define the order of results. + The parameter `user_id` can be used to filter by user id. + The parameter `room_id` can be used to filter by room id. + Returns: + A list of reported events and an integer representing the total number of + reported events that exist given this query + """ + + PATTERNS = admin_patterns("/event_reports$") + + def __init__(self, hs): + self.hs = hs + self.auth = hs.get_auth() + self.store = hs.get_datastore() + + async def on_GET(self, request): + await assert_requester_is_admin(self.auth, request) + + start = parse_integer(request, "from", default=0) + limit = parse_integer(request, "limit", default=100) + direction = parse_string(request, "dir", default="b") + user_id = parse_string(request, "user_id") + room_id = parse_string(request, "room_id") + + if start < 0: + raise SynapseError( + 400, + "The start parameter must be a positive integer.", + errcode=Codes.INVALID_PARAM, + ) + + if limit < 0: + raise SynapseError( + 400, + "The limit parameter must be a positive integer.", + errcode=Codes.INVALID_PARAM, + ) + + if direction not in ("f", "b"): + raise SynapseError( + 400, "Unknown direction: %s" % (direction,), errcode=Codes.INVALID_PARAM + ) + + event_reports, total = await self.store.get_event_reports_paginate( + start, limit, direction, user_id, room_id + ) + ret = {"event_reports": event_reports, "total": total} + if (start + limit) < total: + ret["next_token"] = start + len(event_reports) + + return 200, ret diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index bd6f9553c6..3ee097abf7 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -1328,6 +1328,101 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore): desc="add_event_report", ) + async def get_event_reports_paginate( + self, + start: int, + limit: int, + direction: str = "b", + user_id: Optional[str] = None, + room_id: Optional[str] = None, + ) -> Tuple[List[Dict[str, Any]], int]: + """Retrieve a paginated list of event reports + + Args: + start: event offset to begin the query from + limit: number of rows to retrieve + direction: Whether to fetch the most recent first (`"b"`) or the + oldest first (`"f"`) + user_id: search for user_id. Ignored if user_id is None + room_id: search for room_id. Ignored if room_id is None + Returns: + event_reports: json list of event reports + count: total number of event reports matching the filter criteria + """ + + def _get_event_reports_paginate_txn(txn): + filters = [] + args = [] + + if user_id: + filters.append("er.user_id LIKE ?") + args.extend(["%" + user_id + "%"]) + if room_id: + filters.append("er.room_id LIKE ?") + args.extend(["%" + room_id + "%"]) + + if direction == "b": + order = "DESC" + else: + order = "ASC" + + where_clause = "WHERE " + " AND ".join(filters) if len(filters) > 0 else "" + + sql = """ + SELECT COUNT(*) as total_event_reports + FROM event_reports AS er + {} + """.format( + where_clause + ) + txn.execute(sql, args) + count = txn.fetchone()[0] + + sql = """ + SELECT + er.id, + er.received_ts, + er.room_id, + er.event_id, + er.user_id, + er.reason, + er.content, + events.sender, + room_aliases.room_alias, + event_json.json AS event_json + FROM event_reports AS er + LEFT JOIN room_aliases + ON room_aliases.room_id = er.room_id + JOIN events + ON events.event_id = er.event_id + JOIN event_json + ON event_json.event_id = er.event_id + {where_clause} + ORDER BY er.received_ts {order} + LIMIT ? + OFFSET ? + """.format( + where_clause=where_clause, order=order, + ) + + args += [limit, start] + txn.execute(sql, args) + event_reports = self.db_pool.cursor_to_dict(txn) + + if count > 0: + for row in event_reports: + try: + row["content"] = db_to_json(row["content"]) + row["event_json"] = db_to_json(row["event_json"]) + except Exception: + continue + + return event_reports, count + + return await self.db_pool.runInteraction( + "get_event_reports_paginate", _get_event_reports_paginate_txn + ) + def get_current_public_room_stream_id(self): return self._public_room_id_gen.get_current_token() diff --git a/tests/rest/admin/test_event_reports.py b/tests/rest/admin/test_event_reports.py new file mode 100644 index 0000000000..bf79086f78 --- /dev/null +++ b/tests/rest/admin/test_event_reports.py @@ -0,0 +1,382 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Dirk Klimpel +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json + +import synapse.rest.admin +from synapse.api.errors import Codes +from synapse.rest.client.v1 import login, room +from synapse.rest.client.v2_alpha import report_event + +from tests import unittest + + +class EventReportsTestCase(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + report_event.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + self.store = hs.get_datastore() + + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + self.other_user = self.register_user("user", "pass") + self.other_user_tok = self.login("user", "pass") + + self.room_id1 = self.helper.create_room_as( + self.other_user, tok=self.other_user_tok, is_public=True + ) + self.helper.join(self.room_id1, user=self.admin_user, tok=self.admin_user_tok) + + self.room_id2 = self.helper.create_room_as( + self.other_user, tok=self.other_user_tok, is_public=True + ) + self.helper.join(self.room_id2, user=self.admin_user, tok=self.admin_user_tok) + + # Two rooms and two users. Every user sends and reports every room event + for i in range(5): + self._create_event_and_report( + room_id=self.room_id1, user_tok=self.other_user_tok, + ) + for i in range(5): + self._create_event_and_report( + room_id=self.room_id2, user_tok=self.other_user_tok, + ) + for i in range(5): + self._create_event_and_report( + room_id=self.room_id1, user_tok=self.admin_user_tok, + ) + for i in range(5): + self._create_event_and_report( + room_id=self.room_id2, user_tok=self.admin_user_tok, + ) + + self.url = "/_synapse/admin/v1/event_reports" + + def test_requester_is_no_admin(self): + """ + If the user is not a server admin, an error 403 is returned. + """ + + request, channel = self.make_request( + "GET", self.url, access_token=self.other_user_tok, + ) + self.render(request) + + self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + def test_default_success(self): + """ + Testing list of reported events + """ + + request, channel = self.make_request( + "GET", self.url, access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 20) + self.assertEqual(len(channel.json_body["event_reports"]), 20) + self.assertNotIn("next_token", channel.json_body) + self._check_fields(channel.json_body["event_reports"]) + + def test_limit(self): + """ + Testing list of reported events with limit + """ + + request, channel = self.make_request( + "GET", self.url + "?limit=5", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 20) + self.assertEqual(len(channel.json_body["event_reports"]), 5) + self.assertEqual(channel.json_body["next_token"], 5) + self._check_fields(channel.json_body["event_reports"]) + + def test_from(self): + """ + Testing list of reported events with a defined starting point (from) + """ + + request, channel = self.make_request( + "GET", self.url + "?from=5", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 20) + self.assertEqual(len(channel.json_body["event_reports"]), 15) + self.assertNotIn("next_token", channel.json_body) + self._check_fields(channel.json_body["event_reports"]) + + def test_limit_and_from(self): + """ + Testing list of reported events with a defined starting point and limit + """ + + request, channel = self.make_request( + "GET", self.url + "?from=5&limit=10", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 20) + self.assertEqual(channel.json_body["next_token"], 15) + self.assertEqual(len(channel.json_body["event_reports"]), 10) + self._check_fields(channel.json_body["event_reports"]) + + def test_filter_room(self): + """ + Testing list of reported events with a filter of room + """ + + request, channel = self.make_request( + "GET", + self.url + "?room_id=%s" % self.room_id1, + access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 10) + self.assertEqual(len(channel.json_body["event_reports"]), 10) + self.assertNotIn("next_token", channel.json_body) + self._check_fields(channel.json_body["event_reports"]) + + for report in channel.json_body["event_reports"]: + self.assertEqual(report["room_id"], self.room_id1) + + def test_filter_user(self): + """ + Testing list of reported events with a filter of user + """ + + request, channel = self.make_request( + "GET", + self.url + "?user_id=%s" % self.other_user, + access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 10) + self.assertEqual(len(channel.json_body["event_reports"]), 10) + self.assertNotIn("next_token", channel.json_body) + self._check_fields(channel.json_body["event_reports"]) + + for report in channel.json_body["event_reports"]: + self.assertEqual(report["user_id"], self.other_user) + + def test_filter_user_and_room(self): + """ + Testing list of reported events with a filter of user and room + """ + + request, channel = self.make_request( + "GET", + self.url + "?user_id=%s&room_id=%s" % (self.other_user, self.room_id1), + access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 5) + self.assertEqual(len(channel.json_body["event_reports"]), 5) + self.assertNotIn("next_token", channel.json_body) + self._check_fields(channel.json_body["event_reports"]) + + for report in channel.json_body["event_reports"]: + self.assertEqual(report["user_id"], self.other_user) + self.assertEqual(report["room_id"], self.room_id1) + + def test_valid_search_order(self): + """ + Testing search order. Order by timestamps. + """ + + # fetch the most recent first, largest timestamp + request, channel = self.make_request( + "GET", self.url + "?dir=b", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 20) + self.assertEqual(len(channel.json_body["event_reports"]), 20) + report = 1 + while report < len(channel.json_body["event_reports"]): + self.assertGreaterEqual( + channel.json_body["event_reports"][report - 1]["received_ts"], + channel.json_body["event_reports"][report]["received_ts"], + ) + report += 1 + + # fetch the oldest first, smallest timestamp + request, channel = self.make_request( + "GET", self.url + "?dir=f", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 20) + self.assertEqual(len(channel.json_body["event_reports"]), 20) + report = 1 + while report < len(channel.json_body["event_reports"]): + self.assertLessEqual( + channel.json_body["event_reports"][report - 1]["received_ts"], + channel.json_body["event_reports"][report]["received_ts"], + ) + report += 1 + + def test_invalid_search_order(self): + """ + Testing that a invalid search order returns a 400 + """ + + request, channel = self.make_request( + "GET", self.url + "?dir=bar", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + self.assertEqual("Unknown direction: bar", channel.json_body["error"]) + + def test_limit_is_negative(self): + """ + Testing that a negative list parameter returns a 400 + """ + + request, channel = self.make_request( + "GET", self.url + "?limit=-5", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + + def test_from_is_negative(self): + """ + Testing that a negative from parameter returns a 400 + """ + + request, channel = self.make_request( + "GET", self.url + "?from=-5", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + + def test_next_token(self): + """ + Testing that `next_token` appears at the right place + """ + + # `next_token` does not appear + # Number of results is the number of entries + request, channel = self.make_request( + "GET", self.url + "?limit=20", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 20) + self.assertEqual(len(channel.json_body["event_reports"]), 20) + self.assertNotIn("next_token", channel.json_body) + + # `next_token` does not appear + # Number of max results is larger than the number of entries + request, channel = self.make_request( + "GET", self.url + "?limit=21", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 20) + self.assertEqual(len(channel.json_body["event_reports"]), 20) + self.assertNotIn("next_token", channel.json_body) + + # `next_token` does appear + # Number of max results is smaller than the number of entries + request, channel = self.make_request( + "GET", self.url + "?limit=19", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 20) + self.assertEqual(len(channel.json_body["event_reports"]), 19) + self.assertEqual(channel.json_body["next_token"], 19) + + # Check + # Set `from` to value of `next_token` for request remaining entries + # `next_token` does not appear + request, channel = self.make_request( + "GET", self.url + "?from=19", access_token=self.admin_user_tok, + ) + self.render(request) + + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(channel.json_body["total"], 20) + self.assertEqual(len(channel.json_body["event_reports"]), 1) + self.assertNotIn("next_token", channel.json_body) + + def _create_event_and_report(self, room_id, user_tok): + """Create and report events + """ + resp = self.helper.send(room_id, tok=user_tok) + event_id = resp["event_id"] + + request, channel = self.make_request( + "POST", + "rooms/%s/report/%s" % (room_id, event_id), + json.dumps({"score": -100, "reason": "this makes me sad"}), + access_token=user_tok, + ) + self.render(request) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + + def _check_fields(self, content): + """Checks that all attributes are present in a event report + """ + for c in content: + self.assertIn("id", c) + self.assertIn("received_ts", c) + self.assertIn("room_id", c) + self.assertIn("event_id", c) + self.assertIn("user_id", c) + self.assertIn("reason", c) + self.assertIn("content", c) + self.assertIn("sender", c) + self.assertIn("room_alias", c) + self.assertIn("event_json", c) + self.assertIn("score", c["content"]) + self.assertIn("reason", c["content"]) + self.assertIn("auth_events", c["event_json"]) + self.assertIn("type", c["event_json"]) + self.assertIn("room_id", c["event_json"]) + self.assertIn("sender", c["event_json"]) + self.assertIn("content", c["event_json"]) From 8998217540bc41975e64e44c507632361ca95698 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Tue, 22 Sep 2020 19:19:01 +0200 Subject: [PATCH 027/134] Fixed a bug with reactivating users with the admin API (#8362) Fixes: #8359 Trying to reactivate a user with the admin API (`PUT /_synapse/admin/v2/users/`) causes an internal server error. Seems to be a regression in #8033. --- changelog.d/8362.bugfix | 1 + .../storage/databases/main/user_erasure_store.py | 2 +- tests/rest/admin/test_user.py | 14 ++++++++++++++ 3 files changed, 16 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8362.bugfix diff --git a/changelog.d/8362.bugfix b/changelog.d/8362.bugfix new file mode 100644 index 0000000000..4e50067c87 --- /dev/null +++ b/changelog.d/8362.bugfix @@ -0,0 +1 @@ +Fixed a regression in v1.19.0 with reactivating users through the admin API. diff --git a/synapse/storage/databases/main/user_erasure_store.py b/synapse/storage/databases/main/user_erasure_store.py index 2f7c95fc74..f9575b1f1f 100644 --- a/synapse/storage/databases/main/user_erasure_store.py +++ b/synapse/storage/databases/main/user_erasure_store.py @@ -100,7 +100,7 @@ class UserErasureStore(UserErasureWorkerStore): return # They are there, delete them. - self.simple_delete_one_txn( + self.db_pool.simple_delete_one_txn( txn, "erased_users", keyvalues={"user_id": user_id} ) diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index f96011fc1c..98d0623734 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -874,6 +874,10 @@ class UserRestTestCase(unittest.HomeserverTestCase): ) self.render(request) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self._is_erased("@user:test", False) + d = self.store.mark_user_erased("@user:test") + self.assertIsNone(self.get_success(d)) + self._is_erased("@user:test", True) # Attempt to reactivate the user (without a password). request, channel = self.make_request( @@ -906,6 +910,7 @@ class UserRestTestCase(unittest.HomeserverTestCase): self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual("@user:test", channel.json_body["name"]) self.assertEqual(False, channel.json_body["deactivated"]) + self._is_erased("@user:test", False) def test_set_user_as_admin(self): """ @@ -996,6 +1001,15 @@ class UserRestTestCase(unittest.HomeserverTestCase): # Ensure they're still alive self.assertEqual(0, channel.json_body["deactivated"]) + def _is_erased(self, user_id, expect): + """Assert that the user is erased or not + """ + d = self.store.is_user_erased(user_id) + if expect: + self.assertTrue(self.get_success(d)) + else: + self.assertFalse(self.get_success(d)) + class UserMembershipRestTestCase(unittest.HomeserverTestCase): From 4325be1a52b9054a2c1096dcdb29ee79d9ad4ead Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 22 Sep 2020 19:39:29 +0100 Subject: [PATCH 028/134] Fix missing null character check on guest_access room state When updating room_stats_state, we try to check for null bytes slipping in to the content for state events. It turns out we had added guest_access as a field to room_stats_state without including it in the null byte check. Lo and behold, a null byte in a m.room.guest_access event then breaks room_stats_state updates. This PR adds the check for guest_access. A further PR will improve this function so that this hopefully does not happen again in future. --- synapse/storage/databases/main/stats.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index d7816a8606..5beb302be3 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -210,6 +210,7 @@ class StatsStore(StateDeltasStore): * topic * avatar * canonical_alias + * guest_access A is_federatable key can also be included with a boolean value. @@ -234,6 +235,7 @@ class StatsStore(StateDeltasStore): "topic", "avatar", "canonical_alias", + "guest_access", ): field = fields.get(col, sentinel) if field is not sentinel and (not isinstance(field, str) or "\0" in field): From 48336eeb85457e356a7a23619776dc598ebd2189 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 22 Sep 2020 14:54:23 +0100 Subject: [PATCH 029/134] Changelog --- changelog.d/8373.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/8373.bugfix diff --git a/changelog.d/8373.bugfix b/changelog.d/8373.bugfix new file mode 100644 index 0000000000..e9d66a2088 --- /dev/null +++ b/changelog.d/8373.bugfix @@ -0,0 +1 @@ +Include `guest_access` in the fields that are checked for null bytes when updating `room_stats_state`. Broke in v1.7.2. \ No newline at end of file From a4e63e5a47a855884ae3aea41dfbfa464bddb744 Mon Sep 17 00:00:00 2001 From: Julian Fietkau <1278511+jfietkau@users.noreply.github.com> Date: Wed, 23 Sep 2020 12:14:08 +0200 Subject: [PATCH 030/134] Add note to reverse_proxy.md about disabling Apache's mod_security2 (#8375) This change adds a note and a few lines of configuration settings for Apache users to disable ModSecurity for Synapse's virtual hosts. With ModSecurity enabled and running with its default settings, Matrix clients are unable to send chat messages through the Synapse installation. With this change, ModSecurity can be disabled only for the Synapse virtual hosts. --- changelog.d/8375.doc | 1 + docs/reverse_proxy.md | 8 ++++++++ 2 files changed, 9 insertions(+) create mode 100644 changelog.d/8375.doc diff --git a/changelog.d/8375.doc b/changelog.d/8375.doc new file mode 100644 index 0000000000..d291fb92fa --- /dev/null +++ b/changelog.d/8375.doc @@ -0,0 +1 @@ +Add note to the reverse proxy settings documentation about disabling Apache's mod_security2. Contributed by Julian Fietkau (@jfietkau). diff --git a/docs/reverse_proxy.md b/docs/reverse_proxy.md index edd109fa7b..46d8f35771 100644 --- a/docs/reverse_proxy.md +++ b/docs/reverse_proxy.md @@ -121,6 +121,14 @@ example.com:8448 { **NOTE**: ensure the `nocanon` options are included. +**NOTE 2**: It appears that Synapse is currently incompatible with the ModSecurity module for Apache (`mod_security2`). If you need it enabled for other services on your web server, you can disable it for Synapse's two VirtualHosts by including the following lines before each of the two `` above: + +``` + + SecRuleEngine off + +``` + ### HAProxy ``` From bbde4038dff379fdf48b914782a73a6889135a56 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 23 Sep 2020 06:45:37 -0400 Subject: [PATCH 031/134] Do not check lint/test dependencies at runtime. (#8377) moves non-runtime dependencies out of synapse.python_dependencies (test and lint) --- changelog.d/8330.misc | 2 +- changelog.d/8377.misc | 1 + setup.py | 16 ++++++++++++++++ synapse/python_dependencies.py | 13 ++++--------- tox.ini | 8 +++----- 5 files changed, 25 insertions(+), 15 deletions(-) create mode 100644 changelog.d/8377.misc diff --git a/changelog.d/8330.misc b/changelog.d/8330.misc index c51370f215..fbfdd52473 100644 --- a/changelog.d/8330.misc +++ b/changelog.d/8330.misc @@ -1 +1 @@ -Move lint-related dependencies to package-extra field, update CONTRIBUTING.md to utilise this. \ No newline at end of file +Move lint-related dependencies to package-extra field, update CONTRIBUTING.md to utilise this. diff --git a/changelog.d/8377.misc b/changelog.d/8377.misc new file mode 100644 index 0000000000..fbfdd52473 --- /dev/null +++ b/changelog.d/8377.misc @@ -0,0 +1 @@ +Move lint-related dependencies to package-extra field, update CONTRIBUTING.md to utilise this. diff --git a/setup.py b/setup.py index 54ddec8f9f..926b1bc86f 100755 --- a/setup.py +++ b/setup.py @@ -94,6 +94,22 @@ ALL_OPTIONAL_REQUIREMENTS = dependencies["ALL_OPTIONAL_REQUIREMENTS"] # Make `pip install matrix-synapse[all]` install all the optional dependencies. CONDITIONAL_REQUIREMENTS["all"] = list(ALL_OPTIONAL_REQUIREMENTS) +# Developer dependencies should not get included in "all". +# +# We pin black so that our tests don't start failing on new releases. +CONDITIONAL_REQUIREMENTS["lint"] = [ + "isort==5.0.3", + "black==19.10b0", + "flake8-comprehensions", + "flake8", +] + +# Dependencies which are exclusively required by unit test code. This is +# NOT a list of all modules that are necessary to run the unit tests. +# Tests assume that all optional dependencies are installed. +# +# parameterized_class decorator was introduced in parameterized 0.7.0 +CONDITIONAL_REQUIREMENTS["test"] = ["mock>=2.0", "parameterized>=0.7.0"] setup( name="matrix-synapse", diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 67f019fd22..288631477e 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -37,6 +37,9 @@ logger = logging.getLogger(__name__) # installed when that optional dependency requirement is specified. It is passed # to setup() as extras_require in setup.py # +# Note that these both represent runtime dependencies (and the versions +# installed are checked at runtime). +# # [1] https://pip.pypa.io/en/stable/reference/pip_install/#requirement-specifiers. REQUIREMENTS = [ @@ -92,20 +95,12 @@ CONDITIONAL_REQUIREMENTS = { "oidc": ["authlib>=0.14.0"], "systemd": ["systemd-python>=231"], "url_preview": ["lxml>=3.5.0"], - # Dependencies which are exclusively required by unit test code. This is - # NOT a list of all modules that are necessary to run the unit tests. - # Tests assume that all optional dependencies are installed. - # - # parameterized_class decorator was introduced in parameterized 0.7.0 - "test": ["mock>=2.0", "parameterized>=0.7.0"], "sentry": ["sentry-sdk>=0.7.2"], "opentracing": ["jaeger-client>=4.0.0", "opentracing>=2.2.0"], "jwt": ["pyjwt>=1.6.4"], # hiredis is not a *strict* dependency, but it makes things much faster. # (if it is not installed, we fall back to slow code.) "redis": ["txredisapi>=1.4.7", "hiredis"], - # We pin black so that our tests don't start failing on new releases. - "lint": ["isort==5.0.3", "black==19.10b0", "flake8-comprehensions", "flake8"], } ALL_OPTIONAL_REQUIREMENTS = set() # type: Set[str] @@ -113,7 +108,7 @@ ALL_OPTIONAL_REQUIREMENTS = set() # type: Set[str] for name, optional_deps in CONDITIONAL_REQUIREMENTS.items(): # Exclude systemd as it's a system-based requirement. # Exclude lint as it's a dev-based requirement. - if name not in ["systemd", "lint"]: + if name not in ["systemd"]: ALL_OPTIONAL_REQUIREMENTS = set(optional_deps) | ALL_OPTIONAL_REQUIREMENTS diff --git a/tox.ini b/tox.ini index ddcab0198f..4d132eff4c 100644 --- a/tox.ini +++ b/tox.ini @@ -2,13 +2,12 @@ envlist = packaging, py35, py36, py37, py38, check_codestyle, check_isort [base] +extras = test deps = - mock python-subunit junitxml coverage coverage-enable-subprocess - parameterized # cyptography 2.2 requires setuptools >= 18.5 # @@ -36,7 +35,7 @@ setenv = [testenv] deps = {[base]deps} -extras = all +extras = all, test whitelist_externals = sh @@ -84,7 +83,6 @@ deps = # Old automat version for Twisted Automat == 0.3.0 - mock lxml coverage coverage-enable-subprocess @@ -97,7 +95,7 @@ commands = /bin/sh -c 'python -m synapse.python_dependencies | sed -e "s/>=/==/g" -e "s/psycopg2==2.6//" -e "s/pyopenssl==16.0.0/pyopenssl==17.0.0/" | xargs -d"\n" pip install' # Install Synapse itself. This won't update any libraries. - pip install -e . + pip install -e ".[test]" {envbindir}/coverage run "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:} From 916bb9d0d15cf941e73b2e808c553a1edd1c2eb9 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Wed, 23 Sep 2020 17:06:28 +0200 Subject: [PATCH 032/134] Don't push if an user account has expired (#8353) --- changelog.d/8353.bugfix | 1 + synapse/api/auth.py | 6 +----- synapse/push/pusherpool.py | 18 ++++++++++++++++++ synapse/storage/databases/main/registration.py | 14 ++++++++++++++ 4 files changed, 34 insertions(+), 5 deletions(-) create mode 100644 changelog.d/8353.bugfix diff --git a/changelog.d/8353.bugfix b/changelog.d/8353.bugfix new file mode 100644 index 0000000000..45fc0adb8d --- /dev/null +++ b/changelog.d/8353.bugfix @@ -0,0 +1 @@ +Don't send push notifications to expired user accounts. diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 75388643ee..1071a0576e 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -218,11 +218,7 @@ class Auth: # Deny the request if the user account has expired. if self._account_validity.enabled and not allow_expired: user_id = user.to_string() - expiration_ts = await self.store.get_expiration_ts_for_user(user_id) - if ( - expiration_ts is not None - and self.clock.time_msec() >= expiration_ts - ): + if await self.store.is_account_expired(user_id, self.clock.time_msec()): raise AuthError( 403, "User account has expired", errcode=Codes.EXPIRED_ACCOUNT ) diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index cc839ffce4..76150e117b 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -60,6 +60,8 @@ class PusherPool: self.store = self.hs.get_datastore() self.clock = self.hs.get_clock() + self._account_validity = hs.config.account_validity + # We shard the handling of push notifications by user ID. self._pusher_shard_config = hs.config.push.pusher_shard_config self._instance_name = hs.get_instance_name() @@ -202,6 +204,14 @@ class PusherPool: ) for u in users_affected: + # Don't push if the user account has expired + if self._account_validity.enabled: + expired = await self.store.is_account_expired( + u, self.clock.time_msec() + ) + if expired: + continue + if u in self.pushers: for p in self.pushers[u].values(): p.on_new_notifications(max_stream_id) @@ -222,6 +232,14 @@ class PusherPool: ) for u in users_affected: + # Don't push if the user account has expired + if self._account_validity.enabled: + expired = await self.store.is_account_expired( + u, self.clock.time_msec() + ) + if expired: + continue + if u in self.pushers: for p in self.pushers[u].values(): p.on_new_receipts(min_stream_id, max_stream_id) diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 675e81fe34..33825e8949 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -116,6 +116,20 @@ class RegistrationWorkerStore(SQLBaseStore): desc="get_expiration_ts_for_user", ) + async def is_account_expired(self, user_id: str, current_ts: int) -> bool: + """ + Returns whether an user account is expired. + + Args: + user_id: The user's ID + current_ts: The current timestamp + + Returns: + Whether the user account has expired + """ + expiration_ts = await self.get_expiration_ts_for_user(user_id) + return expiration_ts is not None and current_ts >= expiration_ts + async def set_account_validity_for_user( self, user_id: str, From cbabb312e0b59090e5a8cf9e7e016a8618e62867 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 23 Sep 2020 16:11:18 +0100 Subject: [PATCH 033/134] Use `async with` for ID gens (#8383) This will allow us to hit the DB after we've finished using the generated stream ID. --- changelog.d/8383.misc | 1 + .../storage/databases/main/account_data.py | 4 +- synapse/storage/databases/main/deviceinbox.py | 4 +- synapse/storage/databases/main/devices.py | 6 +- .../storage/databases/main/end_to_end_keys.py | 2 +- synapse/storage/databases/main/events.py | 6 +- .../storage/databases/main/group_server.py | 2 +- synapse/storage/databases/main/presence.py | 4 +- synapse/storage/databases/main/push_rule.py | 8 +- synapse/storage/databases/main/pusher.py | 4 +- synapse/storage/databases/main/receipts.py | 2 +- synapse/storage/databases/main/room.py | 6 +- synapse/storage/databases/main/tags.py | 4 +- synapse/storage/util/id_generators.py | 130 ++++++++++-------- tests/storage/test_id_generators.py | 66 +++++---- 15 files changed, 144 insertions(+), 105 deletions(-) create mode 100644 changelog.d/8383.misc diff --git a/changelog.d/8383.misc b/changelog.d/8383.misc new file mode 100644 index 0000000000..cb8318bf57 --- /dev/null +++ b/changelog.d/8383.misc @@ -0,0 +1 @@ +Refactor ID generators to use `async with` syntax. diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index c5a36990e4..ef81d73573 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -339,7 +339,7 @@ class AccountDataStore(AccountDataWorkerStore): """ content_json = json_encoder.encode(content) - with await self._account_data_id_gen.get_next() as next_id: + async with self._account_data_id_gen.get_next() as next_id: # no need to lock here as room_account_data has a unique constraint # on (user_id, room_id, account_data_type) so simple_upsert will # retry if there is a conflict. @@ -387,7 +387,7 @@ class AccountDataStore(AccountDataWorkerStore): """ content_json = json_encoder.encode(content) - with await self._account_data_id_gen.get_next() as next_id: + async with self._account_data_id_gen.get_next() as next_id: # no need to lock here as account_data has a unique constraint on # (user_id, account_data_type) so simple_upsert will retry if # there is a conflict. diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index e71217a41f..d42faa3f1f 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -362,7 +362,7 @@ class DeviceInboxStore(DeviceInboxWorkerStore, DeviceInboxBackgroundUpdateStore) rows.append((destination, stream_id, now_ms, edu_json)) txn.executemany(sql, rows) - with await self._device_inbox_id_gen.get_next() as stream_id: + async with self._device_inbox_id_gen.get_next() as stream_id: now_ms = self.clock.time_msec() await self.db_pool.runInteraction( "add_messages_to_device_inbox", add_messages_txn, now_ms, stream_id @@ -411,7 +411,7 @@ class DeviceInboxStore(DeviceInboxWorkerStore, DeviceInboxBackgroundUpdateStore) txn, stream_id, local_messages_by_user_then_device ) - with await self._device_inbox_id_gen.get_next() as stream_id: + async with self._device_inbox_id_gen.get_next() as stream_id: now_ms = self.clock.time_msec() await self.db_pool.runInteraction( "add_messages_from_remote_to_device_inbox", diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index c04374e43d..fdf394c612 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -377,7 +377,7 @@ class DeviceWorkerStore(SQLBaseStore): THe new stream ID. """ - with await self._device_list_id_gen.get_next() as stream_id: + async with self._device_list_id_gen.get_next() as stream_id: await self.db_pool.runInteraction( "add_user_sig_change_to_streams", self._add_user_signature_change_txn, @@ -1093,7 +1093,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): if not device_ids: return - with await self._device_list_id_gen.get_next_mult( + async with self._device_list_id_gen.get_next_mult( len(device_ids) ) as stream_ids: await self.db_pool.runInteraction( @@ -1108,7 +1108,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): return stream_ids[-1] context = get_active_span_text_map() - with await self._device_list_id_gen.get_next_mult( + async with self._device_list_id_gen.get_next_mult( len(hosts) * len(device_ids) ) as stream_ids: await self.db_pool.runInteraction( diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index c8df0bcb3f..22e1ed15d0 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -831,7 +831,7 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): key (dict): the key data """ - with await self._cross_signing_id_gen.get_next() as stream_id: + async with self._cross_signing_id_gen.get_next() as stream_id: return await self.db_pool.runInteraction( "add_e2e_cross_signing_key", self._set_e2e_cross_signing_key_txn, diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 9a80f419e3..7723d82496 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -156,15 +156,15 @@ class PersistEventsStore: # Note: Multiple instances of this function cannot be in flight at # the same time for the same room. if backfilled: - stream_ordering_manager = await self._backfill_id_gen.get_next_mult( + stream_ordering_manager = self._backfill_id_gen.get_next_mult( len(events_and_contexts) ) else: - stream_ordering_manager = await self._stream_id_gen.get_next_mult( + stream_ordering_manager = self._stream_id_gen.get_next_mult( len(events_and_contexts) ) - with stream_ordering_manager as stream_orderings: + async with stream_ordering_manager as stream_orderings: for (event, context), stream in zip(events_and_contexts, stream_orderings): event.internal_metadata.stream_ordering = stream diff --git a/synapse/storage/databases/main/group_server.py b/synapse/storage/databases/main/group_server.py index ccfbb2135e..7218191965 100644 --- a/synapse/storage/databases/main/group_server.py +++ b/synapse/storage/databases/main/group_server.py @@ -1265,7 +1265,7 @@ class GroupServerStore(GroupServerWorkerStore): return next_id - with await self._group_updates_id_gen.get_next() as next_id: + async with self._group_updates_id_gen.get_next() as next_id: res = await self.db_pool.runInteraction( "register_user_group_membership", _register_user_group_membership_txn, diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py index c9f655dfb7..dbbb99cb95 100644 --- a/synapse/storage/databases/main/presence.py +++ b/synapse/storage/databases/main/presence.py @@ -23,11 +23,11 @@ from synapse.util.iterutils import batch_iter class PresenceStore(SQLBaseStore): async def update_presence(self, presence_states): - stream_ordering_manager = await self._presence_id_gen.get_next_mult( + stream_ordering_manager = self._presence_id_gen.get_next_mult( len(presence_states) ) - with stream_ordering_manager as stream_orderings: + async with stream_ordering_manager as stream_orderings: await self.db_pool.runInteraction( "update_presence", self._update_presence_txn, diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index e20a16f907..711d5aa23d 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -338,7 +338,7 @@ class PushRuleStore(PushRulesWorkerStore): ) -> None: conditions_json = json_encoder.encode(conditions) actions_json = json_encoder.encode(actions) - with await self._push_rules_stream_id_gen.get_next() as stream_id: + async with self._push_rules_stream_id_gen.get_next() as stream_id: event_stream_ordering = self._stream_id_gen.get_current_token() if before or after: @@ -585,7 +585,7 @@ class PushRuleStore(PushRulesWorkerStore): txn, stream_id, event_stream_ordering, user_id, rule_id, op="DELETE" ) - with await self._push_rules_stream_id_gen.get_next() as stream_id: + async with self._push_rules_stream_id_gen.get_next() as stream_id: event_stream_ordering = self._stream_id_gen.get_current_token() await self.db_pool.runInteraction( @@ -616,7 +616,7 @@ class PushRuleStore(PushRulesWorkerStore): Raises: NotFoundError if the rule does not exist. """ - with await self._push_rules_stream_id_gen.get_next() as stream_id: + async with self._push_rules_stream_id_gen.get_next() as stream_id: event_stream_ordering = self._stream_id_gen.get_current_token() await self.db_pool.runInteraction( "_set_push_rule_enabled_txn", @@ -754,7 +754,7 @@ class PushRuleStore(PushRulesWorkerStore): data={"actions": actions_json}, ) - with await self._push_rules_stream_id_gen.get_next() as stream_id: + async with self._push_rules_stream_id_gen.get_next() as stream_id: event_stream_ordering = self._stream_id_gen.get_current_token() await self.db_pool.runInteraction( diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py index c388468273..df8609b97b 100644 --- a/synapse/storage/databases/main/pusher.py +++ b/synapse/storage/databases/main/pusher.py @@ -281,7 +281,7 @@ class PusherStore(PusherWorkerStore): last_stream_ordering, profile_tag="", ) -> None: - with await self._pushers_id_gen.get_next() as stream_id: + async with self._pushers_id_gen.get_next() as stream_id: # no need to lock because `pushers` has a unique key on # (app_id, pushkey, user_name) so simple_upsert will retry await self.db_pool.simple_upsert( @@ -344,7 +344,7 @@ class PusherStore(PusherWorkerStore): }, ) - with await self._pushers_id_gen.get_next() as stream_id: + async with self._pushers_id_gen.get_next() as stream_id: await self.db_pool.runInteraction( "delete_pusher", delete_pusher_txn, stream_id ) diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index f880b5e562..c79ddff680 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -524,7 +524,7 @@ class ReceiptsStore(ReceiptsWorkerStore): "insert_receipt_conv", graph_to_linear ) - with await self._receipts_id_gen.get_next() as stream_id: + async with self._receipts_id_gen.get_next() as stream_id: event_ts = await self.db_pool.runInteraction( "insert_linearized_receipt", self.insert_linearized_receipt_txn, diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 3ee097abf7..3c7630857f 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -1137,7 +1137,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore): }, ) - with await self._public_room_id_gen.get_next() as next_id: + async with self._public_room_id_gen.get_next() as next_id: await self.db_pool.runInteraction( "store_room_txn", store_room_txn, next_id ) @@ -1204,7 +1204,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore): }, ) - with await self._public_room_id_gen.get_next() as next_id: + async with self._public_room_id_gen.get_next() as next_id: await self.db_pool.runInteraction( "set_room_is_public", set_room_is_public_txn, next_id ) @@ -1284,7 +1284,7 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore): }, ) - with await self._public_room_id_gen.get_next() as next_id: + async with self._public_room_id_gen.get_next() as next_id: await self.db_pool.runInteraction( "set_room_is_public_appservice", set_room_is_public_appservice_txn, diff --git a/synapse/storage/databases/main/tags.py b/synapse/storage/databases/main/tags.py index 96ffe26cc9..9f120d3cb6 100644 --- a/synapse/storage/databases/main/tags.py +++ b/synapse/storage/databases/main/tags.py @@ -210,7 +210,7 @@ class TagsStore(TagsWorkerStore): ) self._update_revision_txn(txn, user_id, room_id, next_id) - with await self._account_data_id_gen.get_next() as next_id: + async with self._account_data_id_gen.get_next() as next_id: await self.db_pool.runInteraction("add_tag", add_tag_txn, next_id) self.get_tags_for_user.invalidate((user_id,)) @@ -232,7 +232,7 @@ class TagsStore(TagsWorkerStore): txn.execute(sql, (user_id, room_id, tag)) self._update_revision_txn(txn, user_id, room_id, next_id) - with await self._account_data_id_gen.get_next() as next_id: + async with self._account_data_id_gen.get_next() as next_id: await self.db_pool.runInteraction("remove_tag", remove_tag_txn, next_id) self.get_tags_for_user.invalidate((user_id,)) diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 1de2b91587..b0353ac2dc 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -12,14 +12,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -import contextlib import heapq import logging import threading from collections import deque -from typing import Dict, List, Set +from contextlib import contextmanager +from typing import Dict, List, Optional, Set, Union +import attr from typing_extensions import Deque from synapse.storage.database import DatabasePool, LoggingTransaction @@ -86,7 +86,7 @@ class StreamIdGenerator: upwards, -1 to grow downwards. Usage: - with await stream_id_gen.get_next() as stream_id: + async with stream_id_gen.get_next() as stream_id: # ... persist event ... """ @@ -101,10 +101,10 @@ class StreamIdGenerator: ) self._unfinished_ids = deque() # type: Deque[int] - async def get_next(self): + def get_next(self): """ Usage: - with await stream_id_gen.get_next() as stream_id: + async with stream_id_gen.get_next() as stream_id: # ... persist event ... """ with self._lock: @@ -113,7 +113,7 @@ class StreamIdGenerator: self._unfinished_ids.append(next_id) - @contextlib.contextmanager + @contextmanager def manager(): try: yield next_id @@ -121,12 +121,12 @@ class StreamIdGenerator: with self._lock: self._unfinished_ids.remove(next_id) - return manager() + return _AsyncCtxManagerWrapper(manager()) - async def get_next_mult(self, n): + def get_next_mult(self, n): """ Usage: - with await stream_id_gen.get_next(n) as stream_ids: + async with stream_id_gen.get_next(n) as stream_ids: # ... persist events ... """ with self._lock: @@ -140,7 +140,7 @@ class StreamIdGenerator: for next_id in next_ids: self._unfinished_ids.append(next_id) - @contextlib.contextmanager + @contextmanager def manager(): try: yield next_ids @@ -149,7 +149,7 @@ class StreamIdGenerator: for next_id in next_ids: self._unfinished_ids.remove(next_id) - return manager() + return _AsyncCtxManagerWrapper(manager()) def get_current_token(self): """Returns the maximum stream id such that all stream ids less than or @@ -282,59 +282,23 @@ class MultiWriterIdGenerator: def _load_next_mult_id_txn(self, txn, n: int) -> List[int]: return self._sequence_gen.get_next_mult_txn(txn, n) - async def get_next(self): + def get_next(self): """ Usage: - with await stream_id_gen.get_next() as stream_id: + async with stream_id_gen.get_next() as stream_id: # ... persist event ... """ - next_id = await self._db.runInteraction("_load_next_id", self._load_next_id_txn) - # Assert the fetched ID is actually greater than what we currently - # believe the ID to be. If not, then the sequence and table have got - # out of sync somehow. - with self._lock: - assert self._current_positions.get(self._instance_name, 0) < next_id + return _MultiWriterCtxManager(self) - self._unfinished_ids.add(next_id) - - @contextlib.contextmanager - def manager(): - try: - # Multiply by the return factor so that the ID has correct sign. - yield self._return_factor * next_id - finally: - self._mark_id_as_finished(next_id) - - return manager() - - async def get_next_mult(self, n: int): + def get_next_mult(self, n: int): """ Usage: - with await stream_id_gen.get_next_mult(5) as stream_ids: + async with stream_id_gen.get_next_mult(5) as stream_ids: # ... persist events ... """ - next_ids = await self._db.runInteraction( - "_load_next_mult_id", self._load_next_mult_id_txn, n - ) - # Assert the fetched ID is actually greater than any ID we've already - # seen. If not, then the sequence and table have got out of sync - # somehow. - with self._lock: - assert max(self._current_positions.values(), default=0) < min(next_ids) - - self._unfinished_ids.update(next_ids) - - @contextlib.contextmanager - def manager(): - try: - yield [self._return_factor * i for i in next_ids] - finally: - for i in next_ids: - self._mark_id_as_finished(i) - - return manager() + return _MultiWriterCtxManager(self, n) def get_next_txn(self, txn: LoggingTransaction): """ @@ -482,3 +446,61 @@ class MultiWriterIdGenerator: # There was a gap in seen positions, so there is nothing more to # do. break + + +@attr.s(slots=True) +class _AsyncCtxManagerWrapper: + """Helper class to convert a plain context manager to an async one. + + This is mainly useful if you have a plain context manager but the interface + requires an async one. + """ + + inner = attr.ib() + + async def __aenter__(self): + return self.inner.__enter__() + + async def __aexit__(self, exc_type, exc, tb): + return self.inner.__exit__(exc_type, exc, tb) + + +@attr.s(slots=True) +class _MultiWriterCtxManager: + """Async context manager returned by MultiWriterIdGenerator + """ + + id_gen = attr.ib(type=MultiWriterIdGenerator) + multiple_ids = attr.ib(type=Optional[int], default=None) + stream_ids = attr.ib(type=List[int], factory=list) + + async def __aenter__(self) -> Union[int, List[int]]: + self.stream_ids = await self.id_gen._db.runInteraction( + "_load_next_mult_id", + self.id_gen._load_next_mult_id_txn, + self.multiple_ids or 1, + ) + + # Assert the fetched ID is actually greater than any ID we've already + # seen. If not, then the sequence and table have got out of sync + # somehow. + with self.id_gen._lock: + assert max(self.id_gen._current_positions.values(), default=0) < min( + self.stream_ids + ) + + self.id_gen._unfinished_ids.update(self.stream_ids) + + if self.multiple_ids is None: + return self.stream_ids[0] * self.id_gen._return_factor + else: + return [i * self.id_gen._return_factor for i in self.stream_ids] + + async def __aexit__(self, exc_type, exc, tb): + for i in self.stream_ids: + self.id_gen._mark_id_as_finished(i) + + if exc_type is not None: + return False + + return False diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py index 20636fc400..fb8f5bc255 100644 --- a/tests/storage/test_id_generators.py +++ b/tests/storage/test_id_generators.py @@ -111,7 +111,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): # advanced after we leave the context manager. async def _get_next_async(): - with await id_gen.get_next() as stream_id: + async with id_gen.get_next() as stream_id: self.assertEqual(stream_id, 8) self.assertEqual(id_gen.get_positions(), {"master": 7}) @@ -139,10 +139,10 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): ctx3 = self.get_success(id_gen.get_next()) ctx4 = self.get_success(id_gen.get_next()) - s1 = ctx1.__enter__() - s2 = ctx2.__enter__() - s3 = ctx3.__enter__() - s4 = ctx4.__enter__() + s1 = self.get_success(ctx1.__aenter__()) + s2 = self.get_success(ctx2.__aenter__()) + s3 = self.get_success(ctx3.__aenter__()) + s4 = self.get_success(ctx4.__aenter__()) self.assertEqual(s1, 8) self.assertEqual(s2, 9) @@ -152,22 +152,22 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): self.assertEqual(id_gen.get_positions(), {"master": 7}) self.assertEqual(id_gen.get_current_token_for_writer("master"), 7) - ctx2.__exit__(None, None, None) + self.get_success(ctx2.__aexit__(None, None, None)) self.assertEqual(id_gen.get_positions(), {"master": 7}) self.assertEqual(id_gen.get_current_token_for_writer("master"), 7) - ctx1.__exit__(None, None, None) + self.get_success(ctx1.__aexit__(None, None, None)) self.assertEqual(id_gen.get_positions(), {"master": 9}) self.assertEqual(id_gen.get_current_token_for_writer("master"), 9) - ctx4.__exit__(None, None, None) + self.get_success(ctx4.__aexit__(None, None, None)) self.assertEqual(id_gen.get_positions(), {"master": 9}) self.assertEqual(id_gen.get_current_token_for_writer("master"), 9) - ctx3.__exit__(None, None, None) + self.get_success(ctx3.__aexit__(None, None, None)) self.assertEqual(id_gen.get_positions(), {"master": 11}) self.assertEqual(id_gen.get_current_token_for_writer("master"), 11) @@ -190,7 +190,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): # advanced after we leave the context manager. async def _get_next_async(): - with await first_id_gen.get_next() as stream_id: + async with first_id_gen.get_next() as stream_id: self.assertEqual(stream_id, 8) self.assertEqual( @@ -208,7 +208,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): # stream ID async def _get_next_async(): - with await second_id_gen.get_next() as stream_id: + async with second_id_gen.get_next() as stream_id: self.assertEqual(stream_id, 9) self.assertEqual( @@ -305,9 +305,13 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): self.assertEqual(id_gen.get_positions(), {"first": 3, "second": 5}) self.assertEqual(id_gen.get_persisted_upto_position(), 3) - with self.get_success(id_gen.get_next()) as stream_id: - self.assertEqual(stream_id, 6) - self.assertEqual(id_gen.get_persisted_upto_position(), 3) + + async def _get_next_async(): + async with id_gen.get_next() as stream_id: + self.assertEqual(stream_id, 6) + self.assertEqual(id_gen.get_persisted_upto_position(), 3) + + self.get_success(_get_next_async()) self.assertEqual(id_gen.get_persisted_upto_position(), 6) @@ -373,16 +377,22 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase): """ id_gen = self._create_id_generator() - with self.get_success(id_gen.get_next()) as stream_id: - self._insert_row("master", stream_id) + async def _get_next_async(): + async with id_gen.get_next() as stream_id: + self._insert_row("master", stream_id) + + self.get_success(_get_next_async()) self.assertEqual(id_gen.get_positions(), {"master": -1}) self.assertEqual(id_gen.get_current_token_for_writer("master"), -1) self.assertEqual(id_gen.get_persisted_upto_position(), -1) - with self.get_success(id_gen.get_next_mult(3)) as stream_ids: - for stream_id in stream_ids: - self._insert_row("master", stream_id) + async def _get_next_async2(): + async with id_gen.get_next_mult(3) as stream_ids: + for stream_id in stream_ids: + self._insert_row("master", stream_id) + + self.get_success(_get_next_async2()) self.assertEqual(id_gen.get_positions(), {"master": -4}) self.assertEqual(id_gen.get_current_token_for_writer("master"), -4) @@ -402,18 +412,24 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase): id_gen_1 = self._create_id_generator("first") id_gen_2 = self._create_id_generator("second") - with self.get_success(id_gen_1.get_next()) as stream_id: - self._insert_row("first", stream_id) - id_gen_2.advance("first", stream_id) + async def _get_next_async(): + async with id_gen_1.get_next() as stream_id: + self._insert_row("first", stream_id) + id_gen_2.advance("first", stream_id) + + self.get_success(_get_next_async()) self.assertEqual(id_gen_1.get_positions(), {"first": -1}) self.assertEqual(id_gen_2.get_positions(), {"first": -1}) self.assertEqual(id_gen_1.get_persisted_upto_position(), -1) self.assertEqual(id_gen_2.get_persisted_upto_position(), -1) - with self.get_success(id_gen_2.get_next()) as stream_id: - self._insert_row("second", stream_id) - id_gen_1.advance("second", stream_id) + async def _get_next_async2(): + async with id_gen_2.get_next() as stream_id: + self._insert_row("second", stream_id) + id_gen_1.advance("second", stream_id) + + self.get_success(_get_next_async2()) self.assertEqual(id_gen_1.get_positions(), {"first": -1, "second": -2}) self.assertEqual(id_gen_2.get_positions(), {"first": -1, "second": -2}) From 302dc89f6a16f69e076943cb0a9b94f1e41741f9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 23 Sep 2020 16:42:14 +0100 Subject: [PATCH 034/134] Fix bug which caused failure on join with malformed membership events (#8385) --- changelog.d/8385.bugfix | 1 + synapse/storage/databases/main/events.py | 10 +++++++--- 2 files changed, 8 insertions(+), 3 deletions(-) create mode 100644 changelog.d/8385.bugfix diff --git a/changelog.d/8385.bugfix b/changelog.d/8385.bugfix new file mode 100644 index 0000000000..c42502a8e0 --- /dev/null +++ b/changelog.d/8385.bugfix @@ -0,0 +1 @@ +Fix a bug which could cause errors in rooms with malformed membership events, on servers using sqlite. diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 7723d82496..18def01f50 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -17,7 +17,7 @@ import itertools import logging from collections import OrderedDict, namedtuple -from typing import TYPE_CHECKING, Dict, Iterable, List, Set, Tuple +from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple import attr from prometheus_client import Counter @@ -1108,6 +1108,10 @@ class PersistEventsStore: def _store_room_members_txn(self, txn, events, backfilled): """Store a room member in the database. """ + + def str_or_none(val: Any) -> Optional[str]: + return val if isinstance(val, str) else None + self.db_pool.simple_insert_many_txn( txn, table="room_memberships", @@ -1118,8 +1122,8 @@ class PersistEventsStore: "sender": event.user_id, "room_id": event.room_id, "membership": event.membership, - "display_name": event.content.get("displayname", None), - "avatar_url": event.content.get("avatar_url", None), + "display_name": str_or_none(event.content.get("displayname")), + "avatar_url": str_or_none(event.content.get("avatar_url")), } for event in events ], From 91c60f304256c08e8aff53ed13d5b282057277d6 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 23 Sep 2020 16:42:44 +0100 Subject: [PATCH 035/134] Improve logging of state resolution (#8371) I'd like to get a better insight into what we are doing with respect to state res. The list of state groups we are resolving across should be short (if it isn't, that's a massive problem in itself), so it should be fine to log it in ite entiretly. I've done some grepping and found approximately zero cases in which the "shortcut" code delivered the result, so I've ripped that out too. --- changelog.d/8371.misc | 1 + synapse/state/__init__.py | 64 ++++++++++----------------------------- 2 files changed, 17 insertions(+), 48 deletions(-) create mode 100644 changelog.d/8371.misc diff --git a/changelog.d/8371.misc b/changelog.d/8371.misc new file mode 100644 index 0000000000..6a54a9496a --- /dev/null +++ b/changelog.d/8371.misc @@ -0,0 +1 @@ +Improve logging of state resolution. diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 56d6afb863..5a5ea39e01 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -25,7 +25,6 @@ from typing import ( Sequence, Set, Union, - cast, overload, ) @@ -42,7 +41,7 @@ from synapse.logging.utils import log_function from synapse.state import v1, v2 from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.roommember import ProfileInfo -from synapse.types import Collection, MutableStateMap, StateMap +from synapse.types import Collection, StateMap from synapse.util import Clock from synapse.util.async_helpers import Linearizer from synapse.util.caches.expiringcache import ExpiringCache @@ -472,10 +471,9 @@ class StateResolutionHandler: def __init__(self, hs): self.clock = hs.get_clock() - # dict of set of event_ids -> _StateCacheEntry. - self._state_cache = None self.resolve_linearizer = Linearizer(name="state_resolve_lock") + # dict of set of event_ids -> _StateCacheEntry. self._state_cache = ExpiringCache( cache_name="state_cache", clock=self.clock, @@ -519,57 +517,28 @@ class StateResolutionHandler: Returns: The resolved state """ - logger.debug("resolve_state_groups state_groups %s", state_groups_ids.keys()) - group_names = frozenset(state_groups_ids.keys()) with (await self.resolve_linearizer.queue(group_names)): - if self._state_cache is not None: - cache = self._state_cache.get(group_names, None) - if cache: - return cache + cache = self._state_cache.get(group_names, None) + if cache: + return cache logger.info( - "Resolving state for %s with %d groups", room_id, len(state_groups_ids) + "Resolving state for %s with groups %s", room_id, list(group_names), ) state_groups_histogram.observe(len(state_groups_ids)) - # start by assuming we won't have any conflicted state, and build up the new - # state map by iterating through the state groups. If we discover a conflict, - # we give up and instead use `resolve_events_with_store`. - # - # XXX: is this actually worthwhile, or should we just let - # resolve_events_with_store do it? - new_state = {} # type: MutableStateMap[str] - conflicted_state = False - for st in state_groups_ids.values(): - for key, e_id in st.items(): - if key in new_state: - conflicted_state = True - break - new_state[key] = e_id - if conflicted_state: - break - - if conflicted_state: - logger.info("Resolving conflicted state for %r", room_id) - with Measure(self.clock, "state._resolve_events"): - # resolve_events_with_store returns a StateMap, but we can - # treat it as a MutableStateMap as it is above. It isn't - # actually mutated anymore (and is frozen in - # _make_state_cache_entry below). - new_state = cast( - MutableStateMap, - await resolve_events_with_store( - self.clock, - room_id, - room_version, - list(state_groups_ids.values()), - event_map=event_map, - state_res_store=state_res_store, - ), - ) + with Measure(self.clock, "state._resolve_events"): + new_state = await resolve_events_with_store( + self.clock, + room_id, + room_version, + list(state_groups_ids.values()), + event_map=event_map, + state_res_store=state_res_store, + ) # if the new state matches any of the input state groups, we can # use that state group again. Otherwise we will generate a state_id @@ -579,8 +548,7 @@ class StateResolutionHandler: with Measure(self.clock, "state.create_group_ids"): cache = _make_state_cache_entry(new_state, state_groups_ids) - if self._state_cache is not None: - self._state_cache[group_names] = cache + self._state_cache[group_names] = cache return cache From 2983049a77557512519f3856fc88e3bc5f1915ed Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 23 Sep 2020 18:18:43 +0100 Subject: [PATCH 036/134] Factor out `_send_dummy_event_for_room` (#8370) this makes it possible to use from the manhole, and seems cleaner anyway. --- changelog.d/8370.misc | 1 + synapse/handlers/message.py | 102 +++++++++++++++++++----------------- 2 files changed, 55 insertions(+), 48 deletions(-) create mode 100644 changelog.d/8370.misc diff --git a/changelog.d/8370.misc b/changelog.d/8370.misc new file mode 100644 index 0000000000..1aaac1e0bf --- /dev/null +++ b/changelog.d/8370.misc @@ -0,0 +1 @@ +Factor out a `_send_dummy_event_for_room` method. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index a8fe5cf4e2..6ee559fd1d 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1182,54 +1182,7 @@ class EventCreationHandler: ) for room_id in room_ids: - # For each room we need to find a joined member we can use to send - # the dummy event with. - - latest_event_ids = await self.store.get_prev_events_for_room(room_id) - - members = await self.state.get_current_users_in_room( - room_id, latest_event_ids=latest_event_ids - ) - dummy_event_sent = False - for user_id in members: - if not self.hs.is_mine_id(user_id): - continue - requester = create_requester(user_id) - try: - event, context = await self.create_event( - requester, - { - "type": "org.matrix.dummy_event", - "content": {}, - "room_id": room_id, - "sender": user_id, - }, - prev_event_ids=latest_event_ids, - ) - - event.internal_metadata.proactively_send = False - - # Since this is a dummy-event it is OK if it is sent by a - # shadow-banned user. - await self.send_nonmember_event( - requester, - event, - context, - ratelimit=False, - ignore_shadow_ban=True, - ) - dummy_event_sent = True - break - except ConsentNotGivenError: - logger.info( - "Failed to send dummy event into room %s for user %s due to " - "lack of consent. Will try another user" % (room_id, user_id) - ) - except AuthError: - logger.info( - "Failed to send dummy event into room %s for user %s due to " - "lack of power. Will try another user" % (room_id, user_id) - ) + dummy_event_sent = await self._send_dummy_event_for_room(room_id) if not dummy_event_sent: # Did not find a valid user in the room, so remove from future attempts @@ -1242,6 +1195,59 @@ class EventCreationHandler: now = self.clock.time_msec() self._rooms_to_exclude_from_dummy_event_insertion[room_id] = now + async def _send_dummy_event_for_room(self, room_id: str) -> bool: + """Attempt to send a dummy event for the given room. + + Args: + room_id: room to try to send an event from + + Returns: + True if a dummy event was successfully sent. False if no user was able + to send an event. + """ + + # For each room we need to find a joined member we can use to send + # the dummy event with. + latest_event_ids = await self.store.get_prev_events_for_room(room_id) + members = await self.state.get_current_users_in_room( + room_id, latest_event_ids=latest_event_ids + ) + for user_id in members: + if not self.hs.is_mine_id(user_id): + continue + requester = create_requester(user_id) + try: + event, context = await self.create_event( + requester, + { + "type": "org.matrix.dummy_event", + "content": {}, + "room_id": room_id, + "sender": user_id, + }, + prev_event_ids=latest_event_ids, + ) + + event.internal_metadata.proactively_send = False + + # Since this is a dummy-event it is OK if it is sent by a + # shadow-banned user. + await self.send_nonmember_event( + requester, event, context, ratelimit=False, ignore_shadow_ban=True, + ) + return True + except ConsentNotGivenError: + logger.info( + "Failed to send dummy event into room %s for user %s due to " + "lack of consent. Will try another user" % (room_id, user_id) + ) + except AuthError: + logger.info( + "Failed to send dummy event into room %s for user %s due to " + "lack of power. Will try another user" % (room_id, user_id) + ) + return False + def _expire_rooms_to_exclude_from_dummy_event_insertion(self): expire_before = self.clock.time_msec() - _DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY to_expire = set() From 13099ae4311436b82ae47ca252cac1fa7fa58cc6 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 24 Sep 2020 08:13:55 -0400 Subject: [PATCH 037/134] Mark the shadow_banned column as boolean in synapse_port_db. (#8386) --- .buildkite/test_db.db | Bin 18825216 -> 19279872 bytes changelog.d/8386.bugfix | 1 + scripts/synapse_port_db | 1 + 3 files changed, 2 insertions(+) create mode 100644 changelog.d/8386.bugfix diff --git a/.buildkite/test_db.db b/.buildkite/test_db.db index f20567ba73e97bf2568a9577efb0f132d66c429c..361369a581771bed36692a848aa396df96ad59d9 100644 GIT binary patch delta 168113 zcmb4s2V7J~*Y~a4?-mg0urx(_6BHZRQ54n|JD{MVND+HO5Fu(*Fv=LCf|{6^7zLx= z6bq(lnnJ2+1`=bM1ryVw`OeJTU0CzH&-;GvZ+DpgIdjgmJ9lQznQ`5^dzy7^yRR&9h0#z57JR z_FhuBC@&{BYj)9+!o^XgB?Vy#JUfMu;uqyDELxhEh0guXb}z#;_jsO-C$zXlMMVp< zN*CuWE+vFGJ0W&SY2Ko&{M@Y4yppKm6}@74_92oYW=>vi-lCkv`9+0U#YLrA`Gt_W zaCTl+Va~!lETdP9gEe0{7t<$eVP0;2PF88ooV=_#iy*fpOL7+F6fVv$B;-cd5%QAa z+?>UESw)L-^U4 zx^VjU;MpPnlT6GP5>m#q8H9uOiPF5n#s8rgfx$eRhS_N2&0ex-5oTPNw{RBZb6$Qi z?h--J&X^}RZ)rZXe*vy@TsctE**UZ4<>78sK-wrQ5c(oz)fD96nkdLc>kOP5>5cvY zkn29Z6QglA%*(=cR-8rpC2kqWb%d2?XAp+`xrI=DSqt)3lx7tc6)Fu_3UJQiC5vbi zc>2RLfw_r@MW7D)N)hQ*9)3LA+d%>;Wm$QPb4YW<`ts~tLZTJ3xM<<5(#1uEdATGj zrL^ni<}ZR?T(oEf&X$i4^cb2XHwX7^XuZ6`(xOEPnA*!5ss~dGmn_K3%3D~xcm*`> z!iA9M#rb)Pyt3R`m^avqXUAaPdHK1yd4*ZDMu8`vlpnJ|7w_e%NX%P`8>1j+RvvWX ze|0m=?d}236&Bc)l@Q#W2-EVG7w6N`1a^le$4Qjtqz@?`0pJPGTSUf`a5tWvMtMjx zl6D}^WNsb?a!GEM(C6*>$pemX1-&&TjH%7^g6UCHfoYJ*D7hQIH~eH&OYMd$hO>t4 z#*5PHh6RQ}(m~@hQl-JFzoBn6Zq%QY=Ig8UxyAx%m~o7;hdxQK*In10)$P%h=!V0{ z__Ou{?Mdx+ZMJr})=P6s^MU3i&6bndU$L4fZ-Gr>@gw-}F|l`qz)qpGe-DZYo&p};a)|2i~A1q$pi zn&N*L8_NnJ{RDOZ<@ry8V?-zUKaGyzp#p28ssH=%7}iT*$5Y0x<6~Ku0n$D`_6if& zi7Xka{?`HWzq587A$tZ0@TPuHYMFi9#g9mjo&uXeS^n!784@h8X@u{;43fd5(f_wm z(u)q0L8N<9=6?^9fh6nr>HD9?$$%(@mV0=D*4BB3%6VEdfnZFoQyXh6vK~tqE&os;wViJwt7+*I&Y20AUGY&WQG-{;p zrMIMKrAMTN(m=^%Xfre!Dh%TdA^KnS7uYHK$JmGU^ZA+j{(Q9F$TPYxxmH~xcR*Lc zmFmWGLvio!=~t?E zaTK5p)9X_UIMyNd4iOSiJHTFo4g#G2I@rN-n;^D~&RV&J+EzJ)-X!E!t<_O!H;`$y zgZv{Qm%H5!sxSVHXVoZwWqL2AwtgkF+Tfq;%6}n1`|wITaF+n)Y2$)*v<-fSc0>Uw z(u?Toiu6f}oZksoiJwD9a;J{p2(2mV#~_p$0PIqT1jvGiRR??#M7%>v+mqjIGeFKRyRIw)gWy*Qy zNiRqJe1Qh6b>#hXn!lHQ?SdX#>!_d42yOpiyAn}NfY`Xsj`FDkq4^M>@)Kf3#P;)b zwRh4_90+P#n?6&ivnvG6`OMc9qiQs?EZtKfeN0G$D&F_x*do+iobIWnmH!cr^hD~C zbQcBru&yNCirP9^KERQlXh(^k#jgK-e<5B(wIzPtsHXL0>3tMU?-5NU$GntGoG(is zR3T*Z0@mhh^&q z-AJ85+oIi}oviiIT-6-V%+Z9Ze^5WCE>g#;epa1Stx)w7?}_Kd_2Mv5Exavk6KsMT z{|Uc`&*lTU>)bQkLN1p5h4p%oUCpMj0`ms5sc&VH!6pi0A4abIM@f%2+4*ou>zMkq_!)6tb4&u4HF=7c!`ic2eJZ z_3x@)-M2E5(v{NZe9v~JL;XcK6yW-^^^5&i{e?SeNZoI9&S98C0Is0c z+OP*JsFzcE_LIb}IS~6+hC0#rS9h%ev0r6RhkTsD*k59UygQo@w{H)p^mls4J|I7Y z(v=#py&c(=4mA+$q~Oky2Wy~TWst(5C4G#&6xp?oG#uBjGSEpz>iH2}IaD~80W=rT z%D9?RUwtqaRwoH}UU$*~{kUJHzY|C5=CH0hNKf}ubSORjqLU7&N?#`lC9m}8DuI;4 z$BCnM==iRc&GY>#y`30x9w`m-ap>rFLCPgeJjmQWXq(`2gIA4=&k)d zyVAwvN+Y2|^^wkYC&cNQ)IkkNP690J`#d1P;3QywppyVKG?L!IKpJW6f-Y@9YDnj# zp=6e|s|FaCv`+N$){w4rcq%nc3R2@A?Ct$3)lM8uGlKrrN-8HE?ZZ!Z)dA0g=pU8 zP7LKEAM6e5S2}4ZFLCPc{noE=PT%*zLME?Y?%=?qhGk{C)3}qoewh=!Wa_veI>R9^ zdF}+ub_~?)X1tgR#$+(PYV@OnA&w~#tDHez5DvHf&cXxMo!ytLs9d;-i(z$aDyM@-l85!xrQpZMECyq_C zO&k`Ax5ax|d!ffV*aI^9iRD#1SO?Jlj-#On3QNaW7sXxUwy?55*R=dyY7dxczF zj@crOWK18Mj+!=@rkmoWk+Mq*mmsM^!GvLB5xXfpkhRDqUvQ6(GwMtiV9sY*ZTidf ztLcJlj^Slfl9;p_>>v^AQ^4YJYrTnKjQuNSGK}FsMHIv69<6{Bnn%mWF^rlk@5wOy zn1|peJ>paf&(=K8X7bkOh0MSGk7u)H#9D4TLq-ybSvcdLxn>uFYSbUL~x$C z#P?Nc6IAXql8_}(@IXaIP9@9XLK}<5m6;})Dn?FJyZ7zOR)#OmnN^U7np{-f(Y`R@ ztD*nNG+=KB>)4D5*0B@Q)2%~?4NskzKEWDeZyIYTwJhZ4Li@;lq6KRdI=Fpq7)p!X z5UgPssd|EtXRE*MVqqiE)o)%2l<&*L`{M{A4aVX4};HS*)5wT*dfr97%hJA%`#)qkh|O8>e3BQ=UO@As6U zBpOaWds2H*#cJg0)7mvZ^WE9GNZqLO_2#^}tA=6vlltDKP~B?Sh=LE(EFD$9twv4O zF+ONoqs}Psy_x&CFn`K(n{=%#`wfn?%U&mR*Eb)}7R)DwZhuFgTBjF^Mb`8;V?wo= z*W9q;t0q0iavHq;u}D5U)rJ5$CDV119@F-*x-g51vq9frw~?nu9w`}4bG*g09jVQR z0M-NrIFmyPU_#b?I)P`mS|A39xiE>P{w3rigmzfW7-L=XqUX>=PIfF`u4%VB+gIImQX^u^O3TlSc zmrpFWkyfu5kG_3JdLbaf+SMXy9h^1-x_qzl@bXNx`S(UB-)4!_5SXE=HsxhCcv8amUKV^g-aIL>FFecODjU~FvG1E(Win-yD`FwO+@zm%fGWOS2+2M=yF+Iz1-*`FE_eA!Wwv! z#F&novQ3ilB^he#IU_oC+BFkN{;q#LuW9+)BnvUF_p$+Rsw8Ql+7P%tVp=e^A z>sfgca-GDZ)V;1pfUQ=+ln#2X8YeT^#9yQbaJ`%;61 z0@?y+;pVtuP`UIc+a)EcL&|Q8b`_*cCSYyS!}cvN-UetNbouZHp(e7t;3Hv39!=A} zULA}&KJ-_K-@$9>byWE1N`;B5gLlz0gsJuJ%pfyer*lONBU~ito$=#>bP5_^%15oY z*=QL0gg4RsE$Mb4X;2aq&5-Y1$Cl9_sD1UNSYS4%pnXq$fRni-pwzS@p&lfI+%*o; z@un7B4+BjdVD^%m)_wObhYNn!5ap7n=S5D@eI4GSm%7At5Pz4%7jFHbhGBIFGOec? zyQwoh)i4t6HZ3*IlkQ1dBv!g;{0O?5#(3zY)vZoKR!yW?#e!z@Mbz56rw@uz4Y8m< zB)4Z(revs$3#dHgs294u&EFzG3z^XV(vQ7S{g@~hS!Htj%iJGw>&|?_nC2Pp8EcJ+ zQa$YLjL}!?e$;-VU8i|LGh6+V+Eq1LyerfR8QgL%oZZWOqN;^tjD*S?RSBrX>NZdQ zW@Yz!R=ynKwwmWBR7^qb$8|oaXQW$Slp5i7LH;h%4Yu}xb0X>!?RHT!sbZYUJ%0#? zQh#CHQH|BD8;TBaQzMr^H-GsJtD6feCkMKn;N;Q0+;X^6F>XSKo;QtWOczXBOygm^ ze(JghuGe3TY%*$^DI_A#=MYgQ;)kkzVw&(Q z|2sdIdqP@lSfzhkpKOZKJ!^d4hznK3q5!kIhro)=Dm>sV<8{vh)XdFQ_o-N}avvRn zTEGjVOfkF0b~O}O{wVKLu}tkA62eu)k};+$wd>!O--nDFmO5dWbwcXk^kLSnBx|S= z&Cey{c!ZUxQvN1WKcJvzPwGtQ3wW=hqeHcNRJO9aepD!xlb@@kf%(kRils2-g>dDT z{KDM4<&=TiF0zzhF*tAJI#DdYq4qtlu2KJq&zdn-$seVLI;6;37IjY%Hhwyb=_5M!#6U_MiVkc8Jmsc zq-)YrNn_Y;h|r(aXXx6%rp##{)rM$f&0zJr>W5T!RGU<8VuKhioDx#`clb$M8#kZ5 z%dSNhTj&7O8rHa=Tog16Oax#8jB1M9i@C)?s9~*pu-usK{+_X_*uARQqpH}ms@SWl z*!xtmk5J5}A+gP;CyHwG@kL|We7sOTM1#f8#KYn;*bj9T+J$4n8s#%u7|ow#Bbhe- z2+RZ9PI$-2SKE9(<~U(AN-at1jRwBz6OP=k`6O^Ei6SknJ`wWKYd%>pfB6$bF^GN5 zCq^#1?z5TW^y4U(coDc}eDCuM%Sp>A^xpBo;qr+eeO`lQ$*JW@XwpH?@aj)*`oPy0 zzJBtjH~qt0*w~$-b;!<1)inEsYW-X-NLQF_{M1ZG$2yAD@{1a)zX;CZosqBRfDgb_}oCm*m zDo4TZvz1}++ffN)q~+)J3t$k-UOxuL>=Wyw8Adf4uS?WhD@uTRw_-fQXj?0K!)w8} zqC5Qhl`mr$eMb2d7_Fkq;caGQ%Dv(DX?VSHXJ^2$`Nlx_%f%mrGqFrHlVh3*Z}GFn zg+?FgZE2;{3;YzS4T<`1^gHxJbU*11>c;60*$x&E%gd@nCcVN!>T0l zy0}vuD*P-QfQ@aAZ-Q-Y6L%IiwSCyP*_CWB<_c2{V^>>0Kjc?2(;H>E?e2|UA936l zbNf-)%vDtAQ%xsCCz7C8|k-XoLKm5}kshDPhmW zqi676*7{nf#|}wkewP~2^4x9$ha^D^k@G7{DnB`F&3ihZawXS&f;i3j^6ft z$^{w6L-Zva{W87U1r>d>BM2EXu&z5*N^}N}7I)k0f{tS5=Q4mfOMh62PTvMaSvR&C zO6GG2s5shKx{*dXHk;-%Tkmg1KltqqM9XeMKCO#^Vc!IhcDe%i{cl3kk*KsDqPGXS zSu1=ag*LMaAL#HF5&_WuCM+iq?)69g@A*&TzTC+!VD8QpgKqT+83xS;xO z`vcK^EU3o&pao^PK{Vr9t}mK-3k3Q92y%D0?e;;(e%#R;6?~QJgQDj_GG9;NyXFQ` zF5iHf5O3}XVkh9-k45djtOMD1t1L+G158!jfhofsDrDC!5OxJeGe19&Fl?;^&x=1( z#hBKa7~?vao2)V1GpsRa^&7#7aidPF-3Xhn8#P+>Mlb|FtP;g4Q6*FfD!z(WaaEj( ztzuOu_3Vd()ocAl+q674l-lkGCc86Vd!UYW{w8#}&>EqiUzA_C){oNJT3?_?9tL`r z)`UXb{1v({r8i}$15u0jArI7;rS3M;-0 zHgBj2?e(=w^`i9DJ3w!Ye9WUR%GZP<_xLGFJr#1ScOa?>-ROZbUrsQgsK$( zFcjqFEX`Z%LR3;(rf)G6X}-#Y+ISF5+QHI=Zkx-NgXXAYE5`8nx~*%tEK{)+8_f&K<~Tuyr~otm@+BpYNB1ONJ-~M z*h!!Ffd<0$U(N`a=80@t(3>5V0S$25WI~(2g(A`BUCRRp%K5`Hn2L?=XBW#+vE|>p zD;l$I*eimiq|{IBq@rlg2mx!XvIbOH1FNh-Ro37tYe0rCmSrqUb}$*!Xd9*7$%XO1r*yRljVC9Ti0+|Rjn!xlU2px_p^2io-DpD9%$Qya0_}aDVcWXvdIt9*S!#7Mp;wr@*kq~ z)PTN9`jqehGm@s->60F$7r&ceM*V;9qvUTQrPo&UP}0|+I5S%Nr=325(y>X0=C4ui zL&uJ?@e1ilc&kI!L(z}-?Wx8&lRQwvMR@<;9+IeNwNZNeh(smn&9nBTna(67>5&ok zq#2Z6J85V#>e!uNLD^}EN?nb0kVc`~r+O>8#!$3mT!Ny@XN+A|x-&`9wRb{-l615} zudtW!=2^ScQO+bK>F~;UC21O^TOKngT3?=Ompam!q$K_Ff<5U7N-uAZRgzwwX-_)b znWQA$+8(259Y*QxlZTE%Yrl(C(4iD9afu-UJy1$+tkQ-9XNfKr(OOaO1~2TFFhi^`A7`B z)Z}P7UYu$Rhi!iS^XxAt#fVZ?u45xsao)p(|M4Rmg@`BB^|9~j*r{mUFjz%e3=ti? zTK6k^MYzJq#*~ytSt&x_LvPi0(;M}i?r*u#5@F#)APZOIv#t?+)PwaJ29^vWFL6Q& z`5mP9av%c;(qA9vK#~cPNPc0Vfbr@+-Vt&i@5py@lMU+g49s5ww6HY|dvfDU#U_<; zpD{{$O?{qk24B9L_K!5x`GW?oi+qt2!qA;fpUvWhFxcDJx_cJh9_4u5&w^u1@)R$h zua4{udvFa0$RJn6Y3Ej`RC%^|i9vS~A|ueH(J*vnCVHw-gAguNM{kXUZcA6^Ca0#4&f;(bIvrotc9kF}$Ja_`4+L%gD)I3w@BhUCd_7IDj{ysSk z&3g0QG1brBkCmUjA2*4W#Ge^(DQ48$)q_+wc6N&&2v!k+J((GQ5U=@|Kk!2Em+Bj; zc=?7Z!R>Ig7|uj61&r{iuu_bg18hJ zYlijmnx@sk=F=S20+qM;wzx_R7sE4P8ljnHLe6t4O+p6DA29AbmjWE-4S;vfp^61> zGr%xk!0coKWuHy}3=;+n<7ouMV0wUK&eI3LVOpRC+^}7$UEJJedFpUg;QeywU-JS2@m{<5dp6 zn0jV3-X9yG&VYF#QPg#sY=T5EB_!Iq&XI97!E_Mg&J$$7VH${Wr-?DZFa;#A(}Y+^ zfK33|(q@{>sxu>iV}=QpY^U#K57fLr02F10D|nzCuld{`XCRD9tStw;;Y^r2Dn1>C z@m>_a1CC4-&weigf|U~5e+v$*?|2-i$Uc>T39vB2RT}%UZ{WUzI6=#k0T_n`5ynfl!vv*x zh%3zSH-}&x7D5=eTnq}VZ~Mi>deoH%VH{RLgg-72=8ZFk;!?rt2SZIV>Flu9!LjzF zz`)1bLU0Dwl7z6x!FXzaF7WY|D8L)8U&8`naf5N&01(*t0<74~F9%{A7Bv{RNEC;< z+7*xM1Qs)-UplSu0EeXv=67D%WhF>l4HM4ZV7P&TA8fy^eI=^?mgb^;EE^oKoegT*PLv zQ1lVr5|+y|Tb6Bw?idfsp*z}N_w>aK-7%8l&M!G3K`3VGQ#*XvA?nOH%3*)oJ5t6{ z9KXszP7IBaS2%u80~Af6&i6BZ{{kLGap(808J-=w#h3{*MlEF3R!9WH{19inJ!Ax# z%X`$lo`8kGv=GDlpBX@5)EwOt<1ih>I9r#b;5D`=jKf?Ik z$GUEW1?0e_5996TVRn4RFpR^D591|24N>r#&#f4TnI6Wmzao`WW2%nBHL=yFyX^Mwd{G!`u$zmUvjHll-)FCvsp;hw-Mj?0B(jD&~g?9mZQf9H{W8 zB=o{KOyn?bTN|$6W)C6tdqpbusDNxq`ZYiAQ2gpV_YJ-m0He za_5H&1#kIvM*`$MF@0K8V5yU9&40MC2quOD$i{f}Ykm5_w=aB?;oA?s{oy-6eyz{I z{@;T6A8dmLJ^Z=^k!hS@Y%)%VBXq~58DPdZX~;If8}^|;HP{~3!Er8uDCcEYpZc~V zde%}u<5L$lP=zg^E&(X^`ujepW!G&>{qi|18zc?})QU=0d;w;|UGZ_E752kDP;8G2 zzNmTB$7VEW=#@Z`y$1W`sJzDo*gd@dBy8Ni^-`dUZGv5M3$nphWZmZn;>9x9UmuR{ z+=bo4Y<`y+U7EBPm~623z8S^NDTV#wW!YwQ_MIv~gJEBOE%a3Az8dI)GRF@MWV5Pb z2AzsYVa;m8pX}4(ItCfC_dM@kDXo>3ON*oen5mDI221^=L@7oJlY%8b$wP9fHzysm=r%eXI6|ScnMT}YfK7!Q{g)pzC++URK7K4m}`Z=UuT9Rv1QmG zl`u-%3_en0TZWC|xJ`0h%dkwI)BH|*=FpE`ijdPj7`8~o=`&H2;FEyd@}`6%pU;LR zPz%ol{%m;l@y~|Aces4~vk@6X1)-4{iA3wj$tcS@(kJ8@ak)5L>?V9AtQFD)FaCR8 z=FadN`0;!Ix0$<5KX{^l9yUHbMHCY|HeB8oFfxGW#LsCqw)^}Pj$A@UzQt+sD6(X& zEnFVhV`NVb)vh?+2VKa|2&=vjI#Rw6nwFg8FRfxGvW*Nk+;rKr*R;T-HhySaWK1#j zHJZTb;|L1QPTMJ;%uaj5z!?t`%RL{%=b@axV21_;;E7#w;?lI8YMSonPmhPok!#aFV4*KFnQ-)g z`IXmj-MQiXE^aCF27i&g&Q|gBxI=6Y{&(&Qdy4PFPBOKa8eoZ-Vlo@sj7N=Y;Iyi@ z^rQ5$v{{-ag&Y1bTrli3zoRZt?@=eKb#R30Y1MMoa4<)HEjB?99Sd`q8^USf5n-x;QkO*g z>!Z|K8$BGsk)?(tnnI#jJBFJ6@eDGm0g~(xhZjUf%;n;M$Sj|v)Y&n#fA4@G!e+KZ zu%d<8J;Ah{*qvEdf+Uc}V+cE3b7Yx9lnR|}cr^nqAt6z!PVCOsG@LZX01%|rvK(2J znN2FpXzFCdOFoi4&W@qlddMDTv)CcX9&FZ=?CCqPrZ`WkSL}kpVvD5JbYghrM$#HP zF}$MWp{CF?>(XL42tQXsV|iK>jt^AALM7Z+PWf6OG?+r7(?AyS!A2!L!h(!C9tt~B z!B~Wkv6-|S2#OhjL8v@%Ie=qoq7H)yRis7Z@L~m(`ey~z8n&Lsr?02+N0jiig5Of& z>6MheS_zL-(sWmq(7b`tQu+t0jJP3z@#^RW_;E-+PF$OE!_ZP^%j^7 zgc<%cyk*#Bm}lq%7mHlcKZO!&(%#3V(AQG!{grTGZQ3l1w?Z60f#sf#vX?CF7i0#H zWT>`s4@bQPe0?;Che|?owo^$+2`?()AKTMnvBZc+X_y5ek&K^DrB+|;5o9K{8dHu3 z6^gchA7QWS-<7oCJE)H7O1MP{&nn?RyyvLJH{JS-uDSQ zqLn+w0u=jDh=MuBD%9TV)gbcN53i&0m(|fSJ)?wQD4`kA9=sM;6X~wlgAMn%m^F^_ z<$N9JVxo$mGBWI zJgtN`l+b54)k=6|H`RAl3C)kwp&;Lpxt!8Os8BK#%u~R+Lj!_odoodT0izFb z>Y)KaIv6yWs5uxzUp$@`iaUm84^`bq3Fj%{u02%sTT1*-B@Ek}76XmSM9p-FZvE?K zFc!6CZ`uqT-q=e;`s}0eRr_fCSta~T3H9|<#ejO+o`v;kQ383o8{jW2sfC7B4QZzu zMhP{8t=P1^*7&z^rS!YB)Nt3ZNZ+9^)ZNg{*Iw5af>+sEjY_>$-Cea$)dRer6NR(j zuz3kQg|2dQ*jwyUa6bm)c8X@ndRX(%u(=^N`0yx{a`=oE%`m*;Nlb)5goQsQHoaR7 zTT`xQLMg|w17{o@up5CnYWIOr!rJ$kJ6gvzTPT~~*jvMF=Lc+2*orfncQ{nR_M0=? zx?yJ(HtUC-8He3C%m%0R6lrZojw)? z79VC~{Y5*QPm{d}>K7eKVaJasg(Ff*y*=}yqwZnX4-=cd*rgEHegC8)qvp9z8L$UP za+E$DHcP&fLus{_Nu3<98Q7HrHvNPX7SxhHU5i>)yy(a?>}+k3Cm>oqK-4hnVK;FG zcokTB9Zp6mYT%HgPJ~OutDtN>Y%XGREg`n#?gpFUNSRu+%htmNqe2^n&W(1!u)m1m zob=<0+87NMnl6VEOL*CO*l2WSsObTvJTmvBT`=r8Iv7YtuW z{GYOxI2gJ;$dH+S+=6a(GQhsMvxFGA&Vqa`C+vBE4Rp*9J8G=b^x5N2fs55}hYHw9 zcVeh}0R*J2aL{2(-HDDempub{OgpKV=TkpQQa4^>xGr2Z z$igwh!@@elRKaG55CZl0`JeTd`PcLf{7(HMevW=9pQP{3^15%;`*bg;i*y^+DZO-4 z)fQd2>MQL%)$`g*s!DBxwnQ~iJB0Z_)l>Xk>!!I5u9XjKrl`p=3v6EMUT^-H2M3^6 zOJf7+DJ{;IdTob?*&VLAZs_PT`yB?Hijd2vj0;wdz28T+6?Q6nk5DZz-BV7yb1db5 z(tfD|#}FVkZM^+ngMTnkH!#eRgeM%WFZXaD96?wRLX~nNDuXO35j6_v%%7I9mamFd z#B;6$2ngzw??RB;N#pD+76RCAN7!$V;LI4|h!x3R5bB7Imk)NtNF{$CX_q4rAZJ#j zgT+7)JhQSV($hq}I;&TQ)~!f?NU6=g(6JTvn-*FvckRj8KS?r-D0drNqH^AhbTpG6 zvX!Ps(JK~8(tF!8^@ASF*7bnti*zy{UyC4?URb66v<-v%J4i~9yrm^L;sup zA{>HQpzp7jbXRqax^i$%4btA#zOF^udD^~ULj6p0M6*USR%2D)QNN;oRGp_zRI622 zREJe7Rijlt;w|x|$@nW1Hd?*|ema)@>k!*zE$=u_A;N1Ds?05WD_C-F6-2}TW zEcYH5ZWo(cO$WFo9I=OlaNKa^9+yX#B--zB;ZjM#?qu2@H!g($txFRf?Kp@amZV9J z?mrA8u#Evrc6+}aN|?@fKF31{VEch}wDn-qP_P>Gj8K|+ec`bH+6?ST6uH&$xQ@V) zBqX|L`P+pLC$d=gEPJ~Z698ukHP^0GqfJ}WUqN>>#|P9m-+}Q;e3Q^|A?W2Ii?*eQ zDdX2bDr-feT^8+>wJQ@H#T!8HCLtCV+$~VcHn{bpbXjMuDQ9DrIfUd;g{wMitsm)H zIos4U3t|b)@|sQ#dX~ll2+ib$^^Q#TCCs+93Q5J#0+ol`uYckC5Dox(so9PAZ)gtK zEwR^F644$YPbad~wBwiGN^&%6B25p7@+h1oK)|&m`@qnfJ{xCjG<$3W`m(~&NC`NS zguaOz=V+vOBA`8PoFb8Z0vXpk62#$%az#w-fN_pdDHaV_pPqo;zn0|ak}*V3E9@R9 zkN9o^SlGUf+($bQCHLD15TNbr5FCY?D$;wS@2b)Rl=na+YOk_qp|xMEgEIn|tJ5bb zOyOu?b$TM2@L{6WUYB7+w(UcEW3va*!fHpsdf|vNJaeIF4@8un){~?w9~f(|Vmd@x z4h{%5(340#Xh*{rVUVJVi>a#dj#p2J15kQ)0s+py+BqTyI}oM1XkF~@6cR)@FrWyb zZzfBiqc;T-4glbypGMfhn2EQHrz#Jsur)mfRTdtzDiqF$6=!IyXP@rQCZfcP&-PH+oLwqDF1#w<2+CNS*p9eks)_`BeUa8b_x<}Z1Hfq&NkQg zL*XbmC}#;tf(e{d&hdyZ;z#iQ+;7}v?lL-EW1G$k_tNN%( zZM1bqD;sS61>T)^M<*I=pYVDOI7_6MvskoliJypS*y2FB`$5}JYEFL%wT`$Sj(R_9 zdyF&qqVqN1^g!0vM~LY40$9XgP3XgUsR8wu2e`lmPuDl}3#i_H#3pY)GOp-(8`!Qx z_;(n=PkmMWh&oyIovKbXRQw5!xQrG47M|hX`Tw=8qjA~e)4)8Vmdo45h4HnSf;uyo zn%v3{fqgPN0K8LErV=|dxI+@-^+V|}naB2Fb9|IpOwhzI^yHR0b2d%c`X!WvrW{LB z&Z78!xW$Cx#RJvYWSFC9(qt-l&3R&a%%*tkUFZeGKemnHSrjLCLJ>UX6~aH0;jf#Dhj>inFYPoylTIKIUKV@Eq?-Qqig%of9ZIT^-Ycl39P~=M3xo z*uHFi1{gQD%U@a|Dp^yT==4F^t`SzbDrmxWxT_$!({d?495&%NT)V|EezmDy#F$w< zJz@fUC&G6Ud>?}EWO;hTl&9C1B{EwX)9a>9rVLXz<0s%GnFkZq+tLYX1zb8Pf^*Jx z!(@Y>{*r#JJ_@F%TXh4qx3v4TV>S0RM>R7wM)g^Bq1sP%NwroLC0-S`iUZ-kfqlYQ z{yu+{pUE5Htz$KL}2dTxRRw&dXqG4{ltk z*n`ow<8fe!t{QGZXJn|^=;t9yyu?ZLOH3tB;WJ(UY8j>yAModJ^fHbTm+xD-;Op#v z0-{$x^tYffF9Nj)M>9i%mFPJfjh>qrk4z`wubRt!E$AlZ%fiuy5{Pbj38Kdcb#0yn zeSxF@;HXs%Zs*6a-^1-w_UJ+ZUfJhfjDwR!e5?gsdO0ot`QvEz!bCXHb2`q7%3iQD z7cYeTZIkU$zXcG@aEOx5fTAxkwej6}MbRKflqfR21d@rL_q#6xa}idAqdU(*E-qs| zBq`Rzj-_>MT^#Cyh^Bh1iO`9rXv|)e6%8e9#07o{qRoBdEa>VnC}EZ(imB`nDy*X_ z&IPsULD`q5fjwp;G#H7R&Ox;0c@S100X5@gh+6N$nX*5P5G^|c(T@F4d+8`m*cMnoVJS zHRXMP#HRGFhr3AFShftMJQvqTT%9b$+7P>KxGP${IV^~M5jAWMOJ-$s`??rDwg9GR$4jNF#vAAS*AKJGSVtY`=HVAf~ zYM#P23fxU>+%KM*IpS$?sW{Z6V#8tMIZbF|{(y@HXDxc)(a} z9B4F1pK>AG%V06yt%)(X!%d$@?H}y+87^Nap4x+B1!UWNFA=@5ad$Y%D4qHyC%RCR z{2R50!_mscQ+ILVClslQZwyC^mrTv$q@fszLDpwi$H@JbPJQFBpowL`s&1Xu3&mKc zEr$Cp_BXTzp*EjgD)}Ajv^>R>{(jNri0U1o)8IQDzBAxEQ{EApwNMuH5!mIXEo(Fy z*OoO#-rJV74K|^}8lq6<)9GGl+m7sR)erxe1>bD==D>HB{P2&nSDZj>L&EDS6WtNB zeSARKQft9mX3{9fmYMNEhLn#FSkQv*j!ik~K?Z9dIGgz8T89J!Whe=3QdC@)PQal& z&lC2&NBx5g#VH1?)XaJ`M&gWr+R2uA=c#I2EV~K5pMR)I7-0b_+I* zcLg=s8PpFlm`_6g7d8XxnoKlRjhrG0S+!&&^r}J?F?%a1~)jXN%n{v*B@0%SOL>8y!8q< z%PBN+K=;1NAsyyp&J5(p?n09ze=sw{47Kk@6A3T_tUlVG)?Woo&zxy7yWnuG`Qf8V zAFF_QnlnT5!Z0&>>-{74yu+*wGqAUOsr0(9^1)qev!iS7ih9430j73c8A8#Q<K z9B)OBw556S7nzHwrNO@k@i~&;?U*x$=d{1jaf=_C8!oTDJ7ssGpZr}Z0{03NDT^ud&wxenS=7qKjTgoXc{XHz zsx%Vakm3WY=MR|=-v#pgAq8lUz}#RWVGJvOuWmtV_AbM4%AE|u+I>)El)y}GP7hSF2&;Cz>W z2I&*w;Q1AJ1!qKL1ge;=nZgVkdMp2?JG%5rf(t>Be$YrbAw4t7g8q4~BM4O*5@AYo zw8I5`d1pib3XwnrL=)J}sOeZoB&vVXBLKM`>$pjyG0*x0z}gj)&OIK=v1S%slt4m* zXS${SlU{IRGV(Oq<6&Vqess`Cm?wmch)4Q(P<<}Cr=|WKN{JuoTlYSk%k$q927D$+ zm;IH$rT$zvPWPJt@wPzVHp2@)5Fs(qw>}g8j7Q(XpQt$%W+L;WV2zWcO7yL}2D86Y zM`2deQw>DyuZ*@u192INf}1kBilY~B6niJ5tFyrQ*j)qB7|(uiHn9ojJWJ@DXPaA3 z7XqMjP1<8g6eP(Iia8uFEt!3FjDlETb9E19LT|W}on~ph7*& z`ZjJ416AQr5BWRbuWWCSQWFkYdaMqjnFg4jeu7!B3o|8XcS5xJ!+{CNYe$$5YP$-G zvOXRN2hr;wid~m+G!VUr<)wtcd^hqG==~nc!w$?e`sSk$Z3%*z*tySv`423#?FQ5q ziLSs@?A1)8^-XwUb`z++cv5~FN3m}+iLyDTtVL{#powGBk=Ri*9!>2i8Ywq)6m3^# z71vM9i;#EzUR2;d9b96=+2aCj7-MMupS3h>e#UiT!6To(9RyaETp+Xjv>)2)~N9XbBcj9~J` z9QbBln$rA6p#>KkOud+ZulA&bw!uY&0482cIFFAa5na~cL4*LNT}%jg95%gZCZ^g5 z0c^TNl;0$Ukn+rYLV&3)CX`g@%;|T$#w3x6z)UG^x%mPAysVpYo>xvZ}#j^+j zrm&b0Fga8qw7f-iz|@tb(7_fRIr(-rpVTGHSTSCE6N*7_*HHwAi7Lj~d9?e&v=m2U ze}L|b*--P8T*3#_QA{|`D{@iF!*d7$rlFY7I&Hj?qGdhpFEIVYgq*zbq+7sb6Gv-b znXTkF^X)uR6qspZ0=|%xmNC0_Bq6{I6B7dJU?YT7aAQ2Jd6-#Z!v5vZ%!E)A(vzfs z86_s*dzESXH|```S1^&pxTOxxosfRf5=Rc@!2A*8_}*EHXG4Ec_FJsQ_b&VEA7y$o z$M7BLkH8j)!PpAk%Dtug;QYHz3NTzlVI9TewLdY+2fz2@Ww;*anhu7q2hRW?*Uj$- z)VDlqfqNUR_lmE*$NGb(cS*9g0(L;QFbkLkD05ZsAhe*Rq?Y5ZqtrfUx5-;tOQx`> z_~^sA)uY}okw?A1NPb7)&oZg7*<3nQKal%XAHYAtPvtw%)P~Y{Z6A}0e&m{l(kM9H zP(&7Za>v2aGH@Rk@yXS>Xu!6NaG8I)6gp90MPDR7+_w>{>sK+9zCzkiYx5w3+7&&!8CdnOVdn zi;_PtE#{=3>ReYBBCjt>uW+Ua)Y`gW4w`zk^gXB`)X^<|9zXZtWMs@+H1||nDch*# z{h6U$2!s60K7J}#>aOM|HEd_AdD~4m^-GElbMyXZYo4Q_GS$#H(*ajg< zKg%J~_U3Bhs2Ee80~mFQZ2c@a*ns`hn5gn>%!$^%za&&g;#nl#2yv|Nv-hb2;|Emn zpbwVBz_Pp;0-|Rs(SyBQ_m8Y3dZNpxD0;Lgwp&)P5yV85Pj>LtE=1JPGx0+zI{!l| zdbbk3t%QFoVdO`YK2r&|DPgk`-uZ}>Ii`G!q8FN`-xI;q|L&oWsgg|}Qz0)a;SDA9 zy+ZM^N?4|ZjY`-KArv&Ge27DM>(1ALO}O%+$_G12RkF2$x^yo6gbI9039l%j_EU-{ zE8zkq+@pl=D&c)4jQ))BXDeYXgrvr?OEh)`)uNKM9i+yi$`c$|&OU>J#XItb{~Hn` zc3duxb6_QxMg{R84^@3m^~g&2of3L}LGe*axJn6+DB%}MXuL}K23=KJ0eeT2URED4 zl4og6#gtLAt>$zu$vfJX-R3wo+zW`8Xo<@&Mab8`UN)ZR^w)_MpRVl+ zzFoEwj%j>KxazaDE^N3w^2V~$Uk~zQPBW&}#@~%grJtl?!wtiHxIT6coTbTLA`)8E|`1?58k4r3^tJ zp-6f<9N%74R*_2ubkZBY*9s8tw3QODpX zY5vz%faMTt-=DZeNt_)5i7)Lvq-X~VB2L>$Z=AT_jP~w0Xs-Y;C1QpGf zZWr{=w+HQ|1B0UzLtWe!SOe~HWDpFGn4WVL8USaI(En^c;7~t>s*inXwW5Js9kzVz z6A|`2fWa1vNd0E5Zg9w$ zNM+c9H;zJkuQfy%iMwbyQ!#-OSQl6fkt%MvQ4L#HD-T47gymG*a@OpxFJ*%H4;f*R zX|hQ$Ho}2O4OpJCC4=DwL$3Y;3hY=OBWHFj57P4n?=JUR2)6R^JX$wnaTKa~`p2MZ zpTC#OK7X(H=Fu|g9y6Btn}L0hgFM4622ZD9f;-op|DOGpe~Ep8-^_00XNb}4RFM%{ zbytOhx?{o;-8x~o&ZhZ9^NcP~vr7B3CSCiQ##g&jeM>t>eNLOCuF-1LIc&H(PI6K6 zhObp07*41T88)bv878Sl8hWZc^}mZh=-(7yj%5B8AJOj?XXqDzd*&r@$*eb(O2wvO zQY!ZEGPxVSGrp*ojGK(pj1dy5wQlcLn+f-KceguVNdp|7E^~<6MQrjGhhxhOg1~;0 z1F=!pjD-%DkW9+bxZc5%K@i(9hY#gg0%ZSC;BaXggAsc0%I?+zzJSp)NB0fO2js2)Ajz~QraT>*Q>i}xm0)hxzs%^Z&Mm=UMkmq{hkA>Q*z=7)L#`TKB zmzYNql6S8*D z7CW$)aMW5i*?|TE;6pdmhpS|B`c5?D&>=??Pj|?)*ZVXo1BBX)$vG)R#P0)QQX_$oc0-+9ebqOJ}ARpp?$Yd{hJx`ah;|`Pjb$ z|Bq>k=hORlXWDx#d4TXKH@ojvq2jw0;2nhEj%!hjBz@X48Fog9)1Y}{`qrw3m8Tk3 z3C%(pcZ)$j>+-x&Q1^U&uuQvG_+mN+k?SyXJQu~L^W~WBh=dsHs!utCPT+G!6M=mlb)P&{ya)^?sT^;SkNs zx=p&tx*p)EdRP037NyQihKo!FzN3=g&Mr$3X%k5l-FC z&}orJ%!YV0CT4@Dk%c=|SPy6$@(=rlP2|{Dhy_3ir=Zf<4H=wi5(cNBz;olmQPjqc zKzV-Lh8qUrO(r)D+K_1#*k73#xD9W^6c_d!yP8d9zGBwfKXP&QhAI}!68~EDzkjVV z0M&5Ou5!if4P}pngD3xBIC+fCx|&$+&u|gdP2)iQx5f{Q#hO3VNpQ>kPR(q+zgkqi zkK#HuWD9sbFV}Qz7@+2~=V=wanYl7tj{beac9t`EQs|o-*CXWMKR3Xozs3XSYInOr z9#Q7zS5>I(-iBA`I{2-&>*2ES&xU*10(%D=X|t<(s+iX`Z)kR)#O$g}Kf@^YoZt?P zJCJ{#FXuC)p}epD1otCcrVJmsA-l>3HxDak(BE!b9U)JcRrQ1l75g1eLcg|Fgv(R& ztFHb(!oCAMisJqM_WGq>0@6zgJ%NO#bP_s9Nl@v8P^5{FP^G3qXoiH6K@cv0f*>U* za)Lkt2m(?P6%-H%9YG;2xz5iEGKO3Aww}WIt;4!c zgEL}|W;kPx&fVT?to}o2f$=_rXpk zbgC8+w-9yPA0@~-&@~rz`}e{dFy>_{r)wsKmKIv)rlOdwq{udM52II**F*r6lZo0u zm=FRW^c?|Qo($JO4it==!8zU*KvDf^?f^I%3t;f=F)Gm7i*q!R996yC0dO>w9Ahwu z+-eZDZ#3sX8YNeVa);~59+VGB6WQCx5I zw=k1%AnLomuyZ1!43c}ydwI`dzJpkM3lVuFXnrHsC1`#M{&t{Sn@rdVaXklxq1ZUiYiGi zWym(QVKI4E-|~7h(D>LUPht0~Gr9rmy{QD@tg|}>vcfu}Ls+M;QQX$Bbs)=Z`ZSc? zSP%!}C3NNN5*6QBh+>efoSju&)_y#@O~gs1hJv>l&=#9ZU^D!@Lm}RaErKEDswh^+Fgc8fI78l4C9R#>1GY+B1JHZObi@u9cj64jR zXw^eNzJ**;Xs;loZYYzID&k;Cf8J#89#`xqn?}XuM{yt!3~v-tqw2Ka;=_L&RNrC`~U~6)Vm2FDx68bTOYDcPKDWVM zpAgjpo<03h?COPLVKxlogHY7#mXf8ulA|EEoMMNQA5;PVZYqgoUz3o$mAKw*#I=gJ ziU;8St{Q;iT`!*Iv75Hs9LF!N-M)%*S-)raIq7O_1i(E0t^tRQcr z|70~4mT)o49R`I;xouf-!tIO%WE5=6!Y>=Y#rQ43Zz+Du@XKKZ+gLllFx|(B?ts67 z0GfT3F|2eR>+X!tV^*Wy&&a2S&VxHxv`tPFh5v}7*w?1i&6vLrE607y@%s$F75J@m z&foX>wCDjRBTs_(`xD`P z)>k9IcZmqrbQYg#Y&mZ_lT7%FR0;S#5kc(7i{z^?VL}#s9uhuP3Gkib60WIQviLsp z@%K4alYs9Pm*6o2Vw*Q)suJ+sB7y*qnGnkiSf~=<`$Yt9GoqIZ0}wcTp{gjucZ>+M znGyTBg4REL&p4)ia0*_5xqms086P(yaD8}+zaMR9FDq*j@R9Q*$a@V5e*G4x&EvC2 z1g$grA7d63f_dr1eCp)su93Rfe1XOuIytM0h|?xz;mX`95XZtm8Ar2W*WsXbgyP+ zs6z)A2Hu25KvP#XIb^nanQ&#`1gQi1AAZ`Jf^ZNGAG5xqIRas zz4J+_aCIO#o)cR(FHANxF%>QkBuAUz3K7ZQq-rAI{6HeK=`M3TN|7FP_EmH2tLNAq zbL_c0?Q4|C=EkwQ(g?&0FFJ3(HGWNf4*N9pKIPrY>o>14winhMtI4v`($IVmfv`>> zcCyER&K6N?cIXYh?^sy0z1hz+1?j-NPOclRH;p1Y>}Xbea@``zDnp@-ZG#uan?H=L ziD1RSQ`U87g>8!boVAXuJEGV7zO$idz+lDsqVpQlLgCNJX>V}N0Wp`3n=zN+$-QiEj`H|MDhyI3ct!sCO+OPlD6@#;U$ zHRN#p;6sjSKl5#vBJSwKRQPoeqigzaUx7QC+bJzbguX~hg@Xsb=h`|Tre&(I*Gx=> zj|XwPmI8U#k`OGSDQ@?;&r`~(A_9a6*TMi~stCIfp{)&aR-*mL*jZZp{*qZ;BXEM} zWs}rL&QA*PSSpC?FY!y(o)=#s#&hAo*7uvGj)aep<61zJP)~BZO-B|ZmZZt?7IIw6 z2~PIs-kEBDykvhomK7}Oi>WqNa&e|6$0qyZvB+TC-k+}a$13FPR~H*XMBc{fnh3a- z(4Eni9_+$#jT_!2{JwfDL4=5??k*7~DFO=;cJaNbBG&nYR3oapv6P{ogo*`F@f>w1 z#_XP|-ByEOb}i~b)laK_Uq|pt zAp&@aHOsj;RoVzs;FBV2yRb`{Q{}5E1uiL6t@)3!;+uo)Y(#@8D#6S*2*|;V^;a7a3{5ni6yAX>WEAMCiZ0Lnwei3w!TWFT(Ox&ahS{#z;;nyL`&s_5yl?)+ z9Ai3e>S#PZ$gtWQ4n~#*0C0W6Vx%8Ae(S^v186ZCaT7;l<`CWo1pZyL*Rb+ z)v>K-K_Q~3q$COvE{fusc+{HIQe`M92}DueO-9zfzIN9A$p_(nGWko&I8Pz9*oBKO z{;`7J<=Y1rz83z1HAR>5F~P_j14llVnUbh=Y>bfUVk4J8Hnx&>@gE6-M9np@@Jgb1 z&LMR)fh=~u>h>3OJ+Xy)$)g0L%U7`Q4sJ~T^+a`8M|u*}V%=|`U&73q!cEoa@7FDa ze_)RHI84yyC#rF2QlzO`BVgom$i^`k0p6WO`6aatdS z5(WC8`6w3qpcCDq$7-FB#%dP@c`;&qKh|V$lXY3)&M30(_(Def+;N%=X|(oqcx3m| zTr#-Tx~!XHV{)glNY|DAu_i;Btvy{I+06WL>R5BDHTe$IfyRZrD#-nz<0Etba3wo05&`#z^lyu7A$%QETTL4dxjhmfzGV26#(;QO_CIer*-SVEM!i zWrZEqer527r#z(srkBSxb8PBB!-uh~_}r3k)_jvA#CfFC+G}Q^s!{VK?94j8L&?|? zyVkiQc3rC;mw$=!&Y> zOBLQku+Z=u5zt>$tyJs)(vc z{oQ|cy%bAf6=g8TruBJUL~5wkr)W#dP@@GG%YKItI@;EYMq0?@xb>ou-1V}N$|R8Y zJvIq0Rulv@wUX9IO507ck&0R)WP25j4BsFcS-wFu^0Pd?+8`Q<_)6e|zv81;FSWEL zIQXy8QgA6vFa_N~a6cMmil!=txv0azP@|PDrRGM_Nu0z7|aLU7^k2B?Qt2|zkNBdSm7mcGI zxwlP5BiuDk&;5Vs#zm$~(pui{lc4^woRO@SZQ^2@ZCf{l#&h*HF4B2w+qwpJZ}SPA zuX0l7+svmQi!4}I#!EL&{l|g|>s4jJh4rc#%_o#WWW4FGI8W|h_u6RiIc9S1FWfL+ zq1-G+*6;W_c%rU_C+axX{KUEu*#4}KBZhwZoWeB3@h)8Vs;bfkn-bQIU}H;ks)Ey>B4f{`t>uv&j#p*U1?1be(g=(=S#mxA^5?`7=R>bS7W6Hch}8 z(kjLoYdR-neHn^WD>}q~E!}Vg^+CyrPk_ER_e-Zacm4CKMq?L>#ayty32R-jzSCRf z_yojZQs5M%7eb;;YLHu)pJ3cg4Gk0h6C{*NS=)Bk@-E?Jy?MQz$KgzY20Ou(A^ zk10_9jYjP6Q$uG5vhHV5yvvIR$Kc~b#nvAI&D_^Fklm$X+L#Gda<=rsVw*}?1inrM zEibNG(Y^{Rc!KFFFW#kM)Rn{l)_N;ckCQ(}62#nI&;nmP2i~8wB1fxh#ZRvegX$6+ zj;;=aM|JN+Y?v7f&X_4fp`St=a<-d_xxE0CqqF#BU9q=l9Mxrv!Plxk_ALycVxBVr zj$`N1))p#W8lo1fuWFBNE&mLHT76zzD67=cv?`a_cU!)VT5?$ZoKyUK<#Rjs->QK~om>pLoXGU2ObDibP23PzvT>AWt0B- zalf$b7t2&)1=y9qD_GtV>i)WZ(3vj+UMMc!zN%pZ=HtM{@0|<&+;yV@u>Wc(ZqrG& zeMwgsC2GNV;WrQ1rCR9HAUeycg==kV| z6mQsB{FO0kWIbiL&du8Uw(nKnEZ+}(1AX#+#`u)?K8)bpL0%`krg_!0{bozC)q^YP zTx(;?70V)|mH!J7Q`(wJOsh;?j8BbU7-JEn;A=x4B=r79KS=MR>{5nzPnlwbwKO<8 zy0l_k7GS?_jY-Aoj`k3!`yjdD3~M{SufW53lx&Vz-4WpCG}ivB8Gt2C$RH!j)ZFBVZrK!fHnPLC*VNgrW%|Bixv*7^g)OkBtB(%B?~!# zwF&{z2AHKeP#7s1#}b8r``7VG;6U<2!GVdRE4a+d(UPuY`$R6FuK(`rU&GM6`K5o7-vC{VqEmc& z`_%IO&-7bzw~erFw&hr7dXKSo_ikye>g{JK@w#N$Z&~7%XZg@8#qy5D>ebVH)+^M! z$@a)R!*!UR&4`k)lxp!eRld}__vdx3!zPJ zT}$sXM|!B18rD+{_PIbf+nt`IIor+PH58p)b-$Y`ob5snYc-$nn!;*Yc(EzGBG@)l z^~amc)-0P=OM3JPKg{jVtuUU|ML{x7<6?mTbcOljy{nMR>&^IHH-gcTD?1 zP+eZ`qjqEnOI$UrF$>)U4|}vJBLpUH`c##@dpY0<=9!u&>vYk2hjcYS;4EH|GOx3C zNCU@q<{A^l%{*Z}|%Evu_XUt}$xTfKzuCwkZ zy)w|2sXU~0k>NW-nxUulFh$4l>Ect)`-XRnS1%SGxn;aw*^dlNTi!LW;y?dsz)o4Z zMLIh-+EUlX5;BLq!`fH55T0?i8&3k-Ak6&#($r-5>=1!#qwu0;mY<4M5h?=2ROQWb6Z3=x5AM=_t# z-*yq;bL2@tTaMYQIhur?g21))n9uq*XJ`bmZxZlTKn2@qwGT0Zz;ll>JL)}JQ+)Oy zin8p!x>=d!`Ha?d!`-EP70kg-W_Bu5YrmVoJ$5v+u@M^HRp7Lnpg>ffAK%1V~s7J%pMmvgX&&|&)e7n|Y z2SMPu37UQSpC+NbAn@D_&F+0WL~Wv-AaHGp&V4mZeInZm%(X8%s3rC}*W0Z%-{wt% ztf52HYmOF}Yx8t2F%R0MwH);(0qw44%|~hktp$N=n{~-Etnqt1|3DqJR&NsUeb}e= zX%borf<(EozZUb~wp6|F7M_^c^v__qK2s@`28{lJ9n!^tPScRoq0uViZUtpVS3?&}ICU6l_PoYf>>ju1o++*qj!^j%vl$uYgvl2ehEI4NjNC zfqtYFXB~#9J-k?dS|CgKH9447SdE5C&LtyO-A_QbM1VJIwE;1x{-}bE*{NgoSiwgV zg7w%g&2!NOjll>-EbSW1u2;{1O5w^Zs5Qvo9v6x+;l+Y;RUpgJ6{GvkH0U2rL;d5s zL@kcH1aun_CtooHviR#*C&dvOG!_GN>e{eSXh6`d`0K+0S;Uzs!EEP{300Z>`mg|& z+-zDfyEBXitNFB2Ebq$HVAkS8QEbFMrL%&D-5~UfsUhs5R^0bHda!3PRAKi*yWw_U z;2nZeX3i-owF`9~bcZM}&KhRm$`%sS&pw~T_llT43yj@DJzwZiNBi$~Y|33e8 z|C)YxScRC~@izS^`HuxhHH~7Ye)!koEQr~C&c}0GimAB?$+TJhW_sB6vAZIp#-QEK z8iRf~pJz1m)I~9qeb1Qk%y3J2gxF14hMiW$kdA0mK|Bc6=g#@|J#c0*Z4wIAFNgmY z?)~T(5jWo6*H_W|UT`9k!{;Va)i%iRZ?zAX8 zw7>;J%FeeAu{N{ZMgaE}mhqM@mJstZ*uFQh^n!vKCQNz0GqAz&LGL@EN*<}_p@WxQ z=o{?V)`UU%ndzMAE3e)9 ze<*UHDNuRf^*6;SG!~l^%&je>Elu!FZHTypGvu6Xhqv~Rt;jtXr`J~#!5OMQnjXd$ zt~t0|kGC7&@ME%m5ys}MJ=jQZIY~8L)31jX)3e0HN=Ma8}@%zcyuFsLZ8!Xl?y6Z-xj^!!gRlcinbfCg!D@S9D`@KK( z=2_qAR{V@8&Xm&LYCPJ;Xz>0}$0*DUyP4d;kDXlDAkewcdi1QG1toSJASz>0QF+<> zXwXEXz7NG&Og`F?bx%IpOTkhnr5vK1QmRGg@#Lcm#?h5ck0njpZe|7)kBmJ z-A9IAN@d-#WmOtBs4o(Wsu*Lv))1axxppa_6lN30q zbp&ufWq|}rK0Cw#_~lTqwCH$@g4-#LSWJ?CAM~iUfVUqZu^EAscCqy(k>8JHO;ostX`^EH_UBbq#oUXC}C|?zJbEH-hMJ+=W)m0RAo;j%qs2~7W z7OJSPw+0YUUI3mMs0b)0#cMgID47%iWd-0$A4Peg2q+^(d1Qhj;4KMw-5#S{NSCCW+ zaY}$q0Et>?(*y)2CBQ00X#q(ooG1Yn0l315=Is@LSpcpGpz(;`NgO8Gh8DMzN8J>f z%%Ec##-N{_#-QV`9Y)1wEbX+|(+BAl#gPq25fGJaoL3mo-=SS|6jc)na@0KWgFSx6m)yZ=&BwzX8tfLFqdc*1dRf7iY{b z$FG``yg9eXh+uO&x4NBM+|JEz=O(vvquaT`?OgA6uDG4+#Cg)*k$>ux*DgSL<#ztk z?fiw?`E$4PXKv^Jxt;&(cK+1u{E6H7WA7vyJAU=bBRAlo+xY{x^ZRb+C2r^U+|KX1 zo&V!@{{=3`x8P|CY<+Q7y{N{H4tK0c6tkI?8$DIjT zyY{G$euJ}k^yd^GpHqBdPVvb(#i!;J7w#-BQVNVpHcbn1e@S9H7clpX$943beA0B_3=XYfDjRrzLq9CQ@z! z9w{xTH5U6@U$}V~Hk0dZkMsp>Yu&+OK|U&_sVTNTbWc2w!L&gm1KEp1gX((2?=y)u zW0Ka%BgFyRY#F4oz9U$9u7kZl4G3hXF2<|t{I>}!y^P9?8?8e3G4Z;-}M)2kgiIj+;$w`dHv1S7`r%X(; zxC&6s(!fe?^jB3jm}e2@nW};9deBep8s=M^*&}BI8`QMFN`QG65qKtVfU}@RfC&~6 zxUx9JZ-I|T%OQPKoem~iMBtgufo0zBr8a?C7ZG^obzrL=X#|*ki6-0=Jg`Bd`lu2x z3nK#0Ob=|YRU^P`jQ7DkH1jiR8U#LSaKjI9mkLF+Yz6{cy#?3qA<)%#dVA4TChiCJWl_!JM- zn2sN)*NnLvF}t!%j`#job>dz`< zb*;%-+gAiuyJcdg4#vmuczDmKH*7b=8~l-$VH|=IoMA?#S$74Yowf?{AS6)ToQ@!s~&8nH2SmbH6TARG5M8QQ1vC?6X74x1aH)4(d23>Im^jmjoVGiH_VKSU~UI3U{86;7I-U?u|`lH>Wl_5UnDMFst zs{@Nn#VSB!dSVM4FOH#N{1R-LOCogiT@v~LcVypyRnSc}V+twZ$V$!P2144Fs*sl4 z0n)ajs8^Z@&0OvZ_|5pz_!YK=e(4p*-eIxn(7f2&&9t*i7s`Y(zfvIJTLACIK~`KT z$l@FRMDZ#H$o^m=6o=^(LJ|F#bC*0vG_zAw(LjouJ)t4ndjL(Q8|IcE;Ef$giK-%7{s*qj>B z#>FHda8>3pppB_$zYU!dFaA!&f{wVPh-w&~X*D6Vm{4A$(!1^QlT>k6qtv>~YYKsRjC|M;J?bO!Z34&iLPys3fl~x~{ta z6ob=imSLV(4?{n%Amc0Bed8(H0b{OhiE+Gbn6a&`v9YxBo&JR~M}NZSvURG@H`Xpb zY1WE9?_2)%sczZp{lv1!`zK4h_i{^p?@@?;+06W_x3~EVuRqO`l-}kJhA?wk>sO{* zetk{5{OXt%_&zrc@I7v->$@skx#l}oUrMQ^Z=?H9zuLF8KEcn<;-TVJ z%cj*c>3JLX@2U}%*n5koz0KZBnjWkgPq(uAD_|Qu0n;^$S~9H>3&n;aX$bs*b5nnW z&5te5R?X)rvt-;T)--!s6ZXZjc``pAi(CP_=+Mi~TuCyJ?$gNK_|^uh}V=`Fj(W5Zrl?)~k)YhQ>*4QlZOQ*@7{c=!vb$xijyTz^j9Z#(l&8-qfc;WMi&QU+QT%PGq$%SV=`$l!U} zyw14I+}iAGI&YlG>J*;trB@z0XBD14s~Aih_1^{9L-is8?q}uBhp{ccoqnM=Ckp-O z2h%5pGoLf3Kh#@46g5L^Yr>tsojL8Z8*b(G(#RPde%f&2Pb`xa>W5tJ5ox~CO&dXD z8_X4(2qMzFwaTJ9lS7TPWr0n}KEt;se1#(-2v=w4)Dxe@XSjcVtHpw1oGh$#a&S8BCJ?S|+ceVvaoNmLu92mm}I-1As;?l|llGQ46SFhmTsl z)Q_|}3SUMeSub_8Ru_9i;n9RsM`=|>(~pQawK+^&+D}Y0^B)t!A&<5dqQ03tek6~d%i{@od?k+& zD+PVLJZ9s_+wG}dizB6#pCYe0R;!Htuaa6BqgJw%dKfUijiZNFru|(WP(qyf@qcvk^0XRf^#AA&{LbQc&e{K?^Xp63 zg(jfA?x@adpZ8y03%&Yz)v`UY{iyG2TVfk*i?IG|{meSTYET|qiYyMx$E-o}`JQEb z|McDD`-yJ{-_pn+yxk|sryKoaImzdr>#Z-vi{N-d8gvIK=e;a?uY00u&JXW~JLBh_ z&r%dWC-#3=Ht7A<5fsT{SN4j`NXR^o-=EHe%nR88r3@W(o%MTk#=gdIct{r-)*2?V zGPxHJ=PUQZX1$wXCF$bhKxgi{3&ZvpLX9(>miUVyFp@lW+mV$qbJ#`a%wd<(7qrDN zRn>i~gX`lfza@SFzUzIfAnE@Fwu824YreH5toBW@U!$>UyD7rB#TaJT zXsD_GQXi_UQ5v(DtVcr;Fca=MP+#-)nMck&Hui6AeFil)EtPoky)nfflwy_td0$<* zLX%C*o{9Nbu{~B@gF&NB1g;EynE@IGr8Z%nDKgpeQW^o2+eF}*36phu5Ti9Q z%yH$@ELn-2rURe!(DV;bhm#B!1!dMm_>S&sFQ67D0?*W&>|QC20P1lfaOL1ku2&P0 zcuQ*pP?LKSP?Aoz>bGuc6HuJzGCW9Xb(s=JjMNTm;ai zbLnHrf#^{ewNYr&d9_EfQ})P30F62kcqRa4?>^QfK(kH+m#agsL+Mg;4&kykKb82% z6e!pusOy_rVmroM82$VGtMS52N4y;4sa6XjFlcu-zAL&nDh1}c2;4a(=baGNCR!^v>|s?l4vitl$CX0OU45%>noU z6369t;dKu~Dojq1qKyv(pk!Xu0KxIT0D`y75p^Ik3*-xfymY3mUI!oWeOn; zfcLMnl(J)nB#zo7#CMX!8_r1}@4KVC-Hrl?zK^XFT)h2j!O=kg0cVy-AbxBS=V&j0 z#PMg@#segj&HqH+xzC5SHP zzn%O~0zP!aA3XuKX3k&SMi}N{4Y&u8y4ar)bdVG0o!-WYq0uKG`j;Oh zker>*0jTo{2wsk-9voZ>)c90adH?EQ@OTL4eYDPQleV z1=n^KTvsA3%41!W?zxV6m0YjKnw4DdsW-gmoL_SNBSmkR&3G=BU;P`0XUue3KrkNG!G8pZsyWES5j|8tJLsmW=|y7{^Xa?K?nYkOAvnyh8efne6to=^@` zVJL{p2q(qY_Pvc*M2!RR0xX5IWI9XI7hv<=MZ6wXKY~Kl?u}x?{CtGEdw}Aim3so= zJh%@D!wB^sR|;tD&q7!?DkinxCyQ^@f~eOOA#^yMCD+*-$hLL(CX{_&>i|N;org~N z-m+aG%vu}hzDwJ%rE)oneW}=EEVZ#3#phH^SidVU$7c^dR8-0@-W~|;Ex@jIP)z#k zOGtWBKa{Pm1Hr|uN;P7430SmlD|QWsrF&e$o7 zN2r*0(kYAO>!D%$uN$C4C*sjmjOqGKBlZKGkspBod?KiW@9jkL-FEl{42U>TC5Q5= z^3oQRus?Q_EdKNo9XjcS!4$k$!$RtQ>e(3 zjN1PM`U@4uZQP13{=6G~%F9W(F*IXn$xZxjIfs_~wK_>GOOnL0WV-J&r0ad`GtnDS zLcH#HjYbIKk=C2m;n+Yr*j#KLgbdgHjpvMg4SyJV>(A&vKxD(wy1!U_LglKI5vh`8 zTBe zp!encJoVwdhixVQB!R~^#~h2}JQw1sM=Cz?O+4$ZngMLX6BVBz+b6v;=jXW+XBM>; z^U)ODcp^a59F2gdOYGOFojS2hd{Y8XuQ>hM2eO9=0>57!BhFjk2^Oux+emzzzylgq zmN?J27;#bU;n+9v!ZNsmd*4RM9x5LbL3y_Qm?nTSGp^bZ749oz@{AE8Toh3k!XTnl zlK2hVp(d1bC39 z$PVT-L%dSo+rY&FWsne{;mm%e6#@_RY$3ac)da1a_D?s<)_Lmrvq85$qOWqASBkQvh8aQj+9TRm ze$Q|obelM%PY@J&V~AWlU5mxMcgOk-gq`O_gcy>Qmc*)`#QSu3JsDEBBz#vO$^`>yQl;O%3>Gv-da@0>gB z!BkUOzi3^!rI*hCl>ch~G5*c`t$t_xzVI8*DkMMX#aM_n#o^kSR7_ugWWvtD7 z;9Q&ea8Q8JdX(I!b05wwWt?emMHD`)>+DzfaD>g^ebG(9WR6WY89t14 z4RFS&a}V)*LT&%B&A_>LIm2}l8tSAO2x21A^G_#K^oRBo&+>6 zW!>8zS0!K#!&}zor!nCN@Fny)q!M5)<4HghSZp)X2(YFhf?)XdvKDQQseD+`P_?Ij zFA}V4En{Uv1fG7rNN{&lm4Fov5xD$)6CUlu4v}AvNF7`nRysuB^83v?cT^oUEOrR< z@a@Ia<>FD58H*kwPJB!-<$x`dE{I zbrTVIIukR?wS%ezte=R$)2WyZyme5%&C;-rLQu9f-|R;&)~4IaGWZtD&0=X-NfEEh zz1V*DU;y*_cBy)SSXOxu8|~1 zHI`UZ?ebL4Yg5p|NJ^#jCp(@(v2O6zwZ}$@r*TS4HnQMpJ%evMUq|pBJ>ouZEO z@>n8|!M$FPu_7`pO0z)}6)X(3TX?F&wAN}r#6DRAOigTOuNT}V^p`wV>Mfe;)?3sk z%kp-Vx#%XE=qmJI^>6nMPsBgsjA9#KGbh$2#>B{hlh1o8S9QZO=cA&K3qNm1rOvd>KhiQbHs$Is>Qq&Uure=#Ti<<9t5N_|8LsOcZsBo|3`L*rJJRQ$&8oEnS!`%+9f*lS3}y9$HYB)5+xDHJM-T16&h>n+Jd4?T zwOr0Lq3*&2kLyu>xe470>RHqXp=!j$jsT@%FAi|c5ax4KmpBpAEv~$c@wIUU5aw|N zr2I^}bJFTY{&R-|n7|Pbyzgq4j*avWbXKywl}r7^R3DK-g5Pd7X57%hV@8gfFtJ&q z28|jwu3s*7tbiK-4-}J%`7Q~It&fc#ysvp5z!|1K3tI8An>8j4rVS2zFA7;v2k9At zT6|4@{b{ghPCj^2jhhebw!(72-<|do^x7QFF5V> z$)Y~0+%w*F7%iwi_?xR;=t|6@xP8@AL1u4sgOiy=ee}9jygp~`Q(kQp)!FCDgsd#A zhaNzLLDVH8*f`adTbU4{aALgxf~STFfR%iEl>>?to|Ytmh|WU1P5}4+>LP*2GXD}s zYLWnyyQ3vgJn;etW(lCkuYm-zhF;?U3=MRhiIWdVAfeZJ4q$0cKy>I~-14)Ju?4BO z0I)LWBQpmpNDc%Y1^`_omgEFnj{JrL{JF(ioa$m7@SouCa%9Z;g;>4yp7LJ0@3kmekvvLui@ zTwE^}&>TpC0v8;-U96d@?*8Qwk~nJ}Mh)VzSmxpzHkLsAuwo8il}td&E<7Ee!9E18)LFiB2TNT7f?q)o&Hp;%&-}#!tZ@mr+yN>#4#eEVpDio|Slkk@{k&?7 z33^)qSk)4sEV&~&^4DJF8+5RoB_NTVkwDak!hC~eECEG7T~%D=4xA#Vc}lQYl2*)rjO-6ZcosPYGk4*Lm;#u^72ve8$TA}m|E-{71ZhL(3_$#{J3$nXiLouoIX>#^OaWxfNzkf&;?@_V)K3wNMLc7j~l!D7A z8nRV%7JsV19L~Rop@7{?#gcE1(av!cUsADkUr5QHi^1uB8^x%lM{sR~4p@YH zGw#79404xJ z9LDWCisEo8=I23ayqbzmj&8sZ-=(589cW&E^y);P15je4n6rVd^ccW$cOlr`0%Nm+ z6ss}*KL)Y|f0luwl!~nf9zbk7qH9qdTG*@^`?0FS-*Zb(07gRc;B}0Cb{9`^qI@*#n%dZKiL{<4Y545 z{AgKb8BPJD%x95yZHBpN@0IZLcq*+*H0-<|+M^aNR3p8ORE%YKEH~h0I)2GOh;g6Z=|`zI03SW7N1Z1`GGj z%g&EcT~(j+io}OURF^&j$uTjik3VEyo6XYG&|sv3bc9WO z@~uW`U>DkK?j;;M4Z_=1IoekSu4&SCa~%mOywUm_W=Uv-3(|3@==ry*mn&<#Sq+8r znq%1~`l{Gr7xs!{gAZ%i7rJt# z+FVeZ%@)?S{pJuU>j`Vwesgc>B`X3|4Sue5ZbkdeeI)jXOA1)%lGFgQEU3)gB0E;G(dH>XUI(_CM^Q z%raV^vn2MKsZ!h!6!$-X5XwW!qB?K3vRZ962g`n6XTkWZSZGuhnv{iRWuZk`XjQ6t z`A*fs7UEO0&IjHl?Dk-cLHEULSTg z%yAwWlXAo^wzZ2#gf05j&!*)6>gT1@OED^0x^@l|az71hpgUr1VAWYp`hKN*X8GLz zbJZu-cd~h%IZ;1f-$!4ALOUS9L!1&$|5$EvpGGWU_*pZ19n-pw<6p>Gt~bsSnt=5= z(P8-yvb~grh>Q zn4zjJky(Pug#{X48U2BTM6<1KiFRsg#%us=zuWDyL zcU)R@skw5aSc6zQr>aB>RZ5DEtFPd=6{3nr7*_|$)vHEIAD|Q+S75nQz|dB7*z0jhz$Ze!o;@hT6&#i~l8DVzZ#J>JJT1 zN(m;>oG-{)*Uo+~sT1x+YMxn8SoQyq1zZddB^d6}kLpNO6*N^OHF0rmwM$Rx1hBk+ z5Il%$?q@+?MWS5-AD_??aX*`T8}q=Y(B!Vi^0R<9!ggQ3 zMBA1s#}XD)_h(%#Dqc(A0Y^eZ9L10Qm2x>Lq5f>a3>B{_`5T0Aeq>$QSIy46yj8r0 z8y-KfT}{?3B}6t|T|nENjXEO*R*;Uzf7n8`u2Vv(73WEb!Qt=rCVn*?$P}XFrTq+Yb_WfUm%zfmq(q&JK+U z5|*IMcpoJ*-dEWd@24nv7TW{7D_81R6?-K_eGYnJbkwo?E5U&UwoKaq%WGSh^*@pF z*NRC{+wnB#x+*(;Kr1yD{d|I{EKc@K^F?8=`f)9hO{w zrD+#LG2UXGWsSkK?6KttazG8T1e>p!zcP<8*E2mZ?KDj@wJ^Ri9x%=^b~5@PoZVtW zFGB_WMSZS*sJ^ChN7AMJrFM=E>Z&C^D4Tpi@Z8C~5w zXfQNQ8|fh+dpGmENh1V5#+NRTjMJ4|L+nLYINU=3C6sdOp->X^nkno+(yAhqY&rOf)eNPtT#;?=mFQqThzUQW@!Vo2569ip+s_f(99Tv z{u%*|5~6Lqc^~_Ev=FO^^3VSI}MyMtykN`x?%l zmzobsyIvb6gFYptrTl1iPDX|iu_mWY+$hy&A$+vE_D=*e7 zO&q3}0Hs&(aBE{^;@D|OH}o_F>F?_I(A-jaWGSMsVf4?D3%U@;YW#nIUSC5@9mkIN zI?OR3Hb6P1%`gwK%i;O+dIl)L2BUSk@|dQ9_mP4f@tj!YJ;TBrr+Nn{h+x%ENI3Yc zXITETJ^_k(N={69qgnBvg?w1VG|}IhG#lzzvWVSp^l6C0_h~t$mSark%8LBL6bC5# ziUSp6WQC@>BRc&!B#rpn|E7Moey&$9-7~M<{wKYwID+h@l#C4GaMf&_+jBH<$-=0@=fkC2`Qi#j3MW#*}~CKZ7_ zor@hXMj*AU(1gQ~ZXM_-s#Z=36*C}5NuD1x(SSM%dU`oK^4_W>H0LmzLx>}IUs;8e zooTR=lhEjlHDU(E)$6c~^U6$c(zJ?K!Y)pgYsL=>aYXK_ASz)Cr^@?JkBG{0j)Lof zirw7E3r3ly4pTu{rM;so{$Iytby^o;Q_l#zC+y?v@hETkc|bbomG;8N?jPj$m0u0tt%xa;N$=ra*Sv<d9&`1zlsve@WW{W)OCB)Ytdr6)`5YJv*9+Bd!0gwa*w~W7BEy7~$UMyrf-LCvMNJxJs6^&ze2~({AJE7!Q6(}@Q-mPfd*HD~hWRRy zxeOB}%O1(=OG?LNmEIHdU2z#F*rXMYq-i}JvsEI{j1=tHlLrEulwMUx_B2+olEUg% zmi*bn3VaN`K1{D-s*er?zs+)cJgLP_HN9&WG+t*P2pVfrdS%f}tZLps6b{);QhFtc zd6+ra$k_X8A1ew1Pjd%bRJueZs2~VD%^vJzA1Pr{dU+7g7_a6ekvewbcb3}0g!zM3r8=N{7h(h&bW;eZ^zN$I7)N>7Z3K}C?+_x_`i1qw1x z^NJt~%6E|k2r^G&iy%v?>>{%ZGEdWsAe&#+MFwLh_07`|BO499tvvy-ebR&GX_jGU zZ{Aj)HW)hjHNre2xQlD3+>_E_?Ih6C;3~*w{w>MyD0+*rj1*&;T=sW4mOcKFG&%M# zW4VnUyH^PhBAu2*ri30vs*MxMaZS zN))k8V6BIWqO2q6(=v*5XG)hY(1Y8&lHj%qE)Z5(;-u9TS8h1s-?A$Kj>7I`6!p&@ zMp_~n{I3vR&Jon>EyeE0%nnrKZb>B__F8hFjec*Y6cbgJ#B6Nlt%RwWIn!agB|$Et znDvNK>=@h8b*Zr863PDlFf@^_#ZfY$lzMy9VafFb1%Hj#E`Atxi6pz%i;$dOU)6GC z1_x-v2-_}U0qvx07T>m%-O+MdDUAX~UU!OGjvrRw8pX4lF8!bf*SIOwJS?i+YrCUI zOK*(?R$=0Ik*uyrqwu;$fHs)04--kzudli|{(NZHSYaOK^BWT(v$H=E_v#Itf~ETnLEvdrX)Fnv{0l$cUJCv|143v~Vm z{TKRo_V@GK<~QCER9Hy~@`1~X_K&7U?^^R0{8341i)gSq%Z2?mPeNH<`VN*)8D4i#v8_AhAW16sC)(}zbhZ;3c)V_ zOG@{W<4T!`RDT^-weoH4>9!F;c~+!3?&KETY@t2YR>`iR;er5c_lDP4&r>Z&vOK#C>t>XK%_^Ub=jvy4-}K%AQ5$)X5sN ze4o19h2sCbslFNH+tkHe0!-$XP3JFD~-E{l|a5tJ+`R^u$JFmR{cBW8#UWwyDFQRU93uwZ&lZ})u^ZE0@(5jS5yh| zo$9%Lm7T35J2@$hUZ|K3Xxms>Ue-l*&YLD*q}pCq0^#1f_@XLazD8Y}T7#BdRGo+A zTU6WEidHYF{g$s!*Zx-fGxR&kDg{qUqjxB}<+;z5W+^uIb?l#V#-5ZWU!=6_mF53= zK@~4wq1x`(oac~U=%;mCzCRU@O|UF;%{f(oe0#b!!u>j4?ib{{+P9L=3S>yh@hUqzCCJ35Z4*y_9Uf85`XgBy{ z;*t>cyX=+C5!KY7ww06~CFL~Ht8K+Me`0s+@iJ(m)LM|lBEOI@cSQaHS*z+O+DDID z4Y9;a?kXr8xnm7Tf6)BFEAikAPj6{zNF5@b9LwwRq-@S#H^s!+@#2}H$SM5r^cI3_ z&G;&UjNSXrHPU@L1{4$xfYosY0+bl4uXdPIjsAnTo^CNVw+U z$>P#A3^qpoD7rg(vivy`pOjt?I1Pd7@R@fB7nk+VMJu~h{<-$&0Yf8Ek^866K$P@* zrhzar66oO!DySkZxTs)hBr12;P(kF??uigZi!)}lw>n@R30k^f{YPS+wtA|9ncThS zt{LHoHz*vrCO?oQAMn%uGKLEhGAxK`f6QyUHtqlL4RH0R7O)Sh?XR&96BI=Y<_h+e z$;Uxa`6pKcL*Jla2|wZu_40ie#SlTU=5jg|h?ojv#$MWnd24uY7tP@Rp~+3LqNOcA z`D%Z{@q#8`d#sRC+wr=upN0;411;xB_=1)W*NeQgtBDgdMf+!{J;}6g5glkI#U;rsiT;*0I1>HYtQCLx`!rSPQdTKb7r$eJVM zu)^U_?T$K+T`PsYZ<09TXOozg^L@07?jvZFYm^6*FY5juYi}N3(~9@AH=28x&JJgP}3g$%vtP# za^dI;_k*F^#2SXa|6ovJzI2p4lt6c#wFvL4EPjuYtQ|~mEkd7v5UKcwOYX|x+BG@A z-bW`_()K#yumqQ3QT84b;@dqnO0w5KS(M_vTJTgNj%D!2sTnn1j`O9Q3a#JuoyCOh z`A=G2=UVQH>mes=>>et|mESSrBJY~Qbysose&ba2u2jwDFW_{@(OUiYR2IMA%@%-C zj?+Kx%Fe?zELf!HaqQkN(40!~H_US2C|E?7QP*IK_(8Q`cYaQ-hxVwptL8UNCuzUb z-ntgt%P~F6JO{LT^uT+-7tL*uWuE&6 zX@mQxRxcpsx6fOiI?FN(8VfOy(kstfTrp({83TttRyQGe`6t}zUO3WLudZg9B}@$m zI})K+2hLfV&n#Zva46!QyY-x<_+|l9<5whZ$gc+6_&aM!z${&A7~pusol=fkQ!tB` z8V4lncbv0aC=-GO8t4t@B=q`{wRl3RpclbO3H{vJnyFckyy2{b9yB;(DU?Nuyy3Kj zrUzOX%p#=5c}d)lD7ACw8B4v*Vx-22N$g56IISwNFqoxC4Fh{Qp|R=KEX-2G>g|LY z{e%?GL|C&h%MdFAyF;O|KbKf4VHP76wOmm~uu*i66nI4oZwJd3{Un5=;sd;j_I1Gu*krvb>VivG9 zEc_}nl?}-=fB8(ac(uIF97x}57g%U!xoV-|P?l$V1I7H?a``vYJUjav!-NH9ftYEY zm*rbucg}8s>#uE^TCTrlnx|$Kh9FA$bg{X+Wtt~t7Mge(8a3NP76$XAtmcX6>_=a_ zQ`|Ib>O3cdP7n;mgYp9@yp^?HJPWhXtnW)xw_nL3?J{}x#Rm+n4^7AZNjTu>y1@F0 z&2z6B4m?GTd(S`QT1v>%FV3U{C)PwJsegiD+UHgV^Q_Ctz;9O5l8Skj);CYNhQ9H3 zHN~vTwJ?}xTzu?EykQNWbn`8=HfrwHx2)mi3%S2OJKs{g*8gDO$FAx7`qtdd)2lZh zy{2yhtPEHm5`+3 zC!TP1Y2)0^*`SYyBcoiW<4$3YUpZX2zh)m}mv1}EwzIZ@=8*JAa(8`fv&*$L*!jO* z;;-dTT9%uGXB&UbOnvg$#H7jNCk&mC6hCrY%EZ+1@#9ib#>6Kl;(u@89_>2xrc3qf zycPO42DxVs`R|eCKr=Du-7FV-&wCPMq*j{P?8t0uV<#jJ9sQqb{5PscqfFl{M+eW29W_}o6NU~Oo%Da&=WpEqjn;@W z@`!k&`2UTUSpz9g>tV9TCmfj+PkAny+Vk^Cv?yh4P06*Mupl zNkO4ag}|EGI+ zglE>uvUl(d3)5tM@E;rKpV@hY{m*s?lxr#{)6ZHDw^3+e(Kq5JB#%jw+jr^QrN{en zkDmB{-}l}_10;=`lr%0edH4kR{f;00oicoMGQRSEqD&l@{7>dFNfSOw!TOP|mecs* zJ$ zJ3BkI{BNu*kp5S>$ZulZTIn;gw2*9VG+8d9kNmg9{u{0_ywPX=8=D-clmGUgS?J+I z)6e@|oax)?bzNxkbUY`K=A!XbgAuC@Z`HBJ3PR&)@NL^^W=7MMdUf7*!nm6!=1bZZx!mb8X6NP{+j##ZRDbkGj;QjQVvPyi_hZaA;y;QX=AVvXX;m7bM$G zB+J7Y$^Jb}yjlU78EB!*IqFhJBqm!Dv&u#9EE@;Uc1Wv(XiGBGuOs9GC&<6eC9>H} z0UN?y5q}Q#7-y72F z_DrAYkU7J_vs;j6roZS*qD99gO<>uXGB$q9(8=+mlSd@+xa5BDCfVlSZ*=cBiT@|A zoHAB^lO=GB+uza;iq4#lWdR4T^f$TU8^0NjnHiS(se@-wkd#?Rw9lL5H$!dT=vHsi z0@dOqCkjCkDkn|i;hmiLzBxQ5d91baW}y@+|Gh=5U|jDm-P`r<&qCQUN^6n}l7qrC zr_tSCbfGQWU(TGIIn}|_WYT1H6opSrnv^^|X}tL#_ZM%Jx1I`9<3a~7imh$`HWe4> z`bmu_WP`3v^xK(J(B)uE^c0^?E&k}C!;+x$L``8#h#-Htzf;|x)0M@wYW4VUygb_f z&R_sBDgt6hC(8n53Xghb(6Y&*sfF zSrMY){;74?xcN_*&5bvj^gpq@p^+d<)@eOlXya2{$11Fr?BCE1C{?Q2f`L*VJK_Hy zjcfa>}C$cU+v7s_^X)-_Y>}~hvLtx7I?VW#@Qv1i-;Exo#XUA zE^zMXx3APo$=m5AnQW6mQT$H2OWsORo(tB`c75p}>FxdmjJsF^Pva){V7-^!4o-ls z4A!@_eTbiwb~oB9x79Vg7=7ug&R)|M7_P-X*^C#x*%OrV=VrZ~2S8_P{L2blOFp~I zK)WMz^mZ2j>S)8H1U#^GZ?>0RKEe)ac>N~{UUpn#TiUR(Kkl--q`LuN3IC(F3ng*XEM{xqSA$*Qx;K{n< z_8-}Y*&VbSYF8J&pS#++X-{hxXq#$|X-48v$wg9gWM=t~LbB!FRB%lmxy7MGJ6vNI z72!D0kK8p~aglnIy&JvKzooN_3TJR=-w?cSG|SyU$Ioa1hFL&-fw!=EcGUGP9l4#= z9<}ch>^RUrh-MD;H03VwG*H0-*_pn%pmn8s=d?%dq5`ZjtCn%hR{?%H^8Xz%yEbs_ z?;l1d2Zfj@;d4(nHEPg#t-*G9eaHTE@=uvxTk>?LG2K0#$o&*v4~nVh*dN{^H}v*1 zQPC>7uAy;a^7z!zL#N1%WBlGAc~bd#t&{C=*^*LGJx?P|I;G9Ff7b^Y2a)>#B$7I) zt~p~av+Zzi3wz3-x+rWzPaT~t)`r*)_X1(Vrb&*?yP&rnqw$RN2v5``)Kg~{{FWmg?h)ZtrCPmRkf)WtV~wn{ z3#w}kXP-bgstz|_ev+q&d_VolWY@B`V}HN-dq#NCm^tolG-_*qon6aXz|=36Vf1#R zMHjU?r^X(R-Te5=Q@+t#R$``_B%NJjcYw6vBhLoP&S?xZF*tVjBj03Cf12^EF2Zgc z217074q|xOHF327Wcb+ALkUx(xmeM3pp@~F<2qZxa+5bM*sqm!y2g5QF&t^=mAXN? z#yV@*cb$x|lO?R=oHt4U94!EyT z*Cy^cZgk9Y^mf?d(95B&eSv*fy9&D*c5m5k#FOebv>CWfwn@`Nx+P^wUN+RYxNd(< z;!tuwtMyUld@4yMYV%4Cr4z4YUnLg8&z-_HVvsCnu=#6Qrh*RDMsj+M;35vno$DEB zJvPE=;%m7fc~$_6JJQ!cJ${jOw0nMqJ(a$e{pr&R*^grT`x!`I8@;kmg&a)Rm|5QD zXQV9^NUzIv1jPltU583_S-^ zAO@$DmTp3hJ#Qe^6~=t)hI`SL8_1{gO$77&w;L!T1w-Y^DQ!bq%UD$kM#-z;z`neR zlxy8$z78nQ&6{$l)SEWkLRRb7ZZ%T(TXKk9R4*O?euMnrd07;+pN^`3cLc~PoA z9N4|;=BKld?dCWTBVlM7Mq5Iq{Hu4o7|03g(?!X|$`~*)hV8){Xb9 zOR-x)uiwcM-tyfYd9MrOu>*ngl>bmLz;k2?`dRLn5^pjJnQ%ffs$KM>Y9Y1uG9d0@l zIDF#J*1^vHsQoe(j|eRJ>RAmj+Y*J{h_(x-GgxeLp?@Qs2*q z@(#oIZky&oI*rZuDnof|F!a=bA?>4k(8mseUK*SE=E%%#Yyg`30i;!M&lk(q`(!;{ z4BK!TB0g^cnl@%XI#O$`Uliqdrr_Dbt8G1~U_Y30I1%XaaP+-Eq_H6o1&ECdGE$B$ zjJg5j9l=&N1B{ImQvH4m}H832% zXR8s*^IBoyJ}?}Wnq@9nG#-E0@e>MG5R8IJ6*s+Ty`F3K6>SLigBO$#XpkIx6e#sr zGn|BS4s_Dke4)~^2@f>vwnw&CMx#Xv#zw&q@ouK0jKi`I$qs&AQkKe@_nT~zGS%Oy z@eu}c8y0~=%v1}J-4$vp!rV1c;LpZ#(FvB#JA>&&lkjD&@ zRlx0t>?KX1awos~QXzbK%Jrok6yxvbXTPhyZG^vudX5;{-LsCvIEU448;})@^!kx%$nFHjcCtN)VYkhA46r=QQ9%^_Bf6# zDkRRLly(4-&Qm%5aXhESYE14ICwtl3oDn(ZXaapTHYL>axZJ?$q?w}$<-4M=y<)l{ zk$qGMn*DCsHo^}+0JqAfEm}9fBX~+JUJt)LE%8|r#>Ap4EM%(|zJHS6F_7>10YDy5 zI}g8>(mjZr{w7K{dK-k=r}Z9wsnQ*_L&vp#*PSk%!Wg=3?&0;|zVyiD0T>#JG+C1H z;3vI{zEc90>nckTA-N4r(6`r@;7eUq(SpMtVrYEd9~qacNVbHhO?;bo(^1YDbmw9h zFpQnl86%|`;>v+|%Ci_jj6uIozVf0Iv)nP}VM)>lb^M~>iOKJb+)}}TV3eU?DVP(E zy|1I}b4V($J4Sk=Rv6Hymv7F+OnKvcj+(Bgdp#YDy`8$AWITD0x3{SQ;-1r zeV0$Tjt-doqLiEr2Yeny>F-5pmsw^tfM@T^i5y{#TEkTKhMxC(i0DwCF?ZhJj;KMu z58vt5uUo&a{cTpMjBtzYMLE$ZbHv+t5%t1F8OoqBYW(_q2nNEBDwl9>o-l?pSfL`q zS^2JdER&a$pn{!IG~c*-eAc85A>Fff(KeGR@qN zI;7TWhkf}5;W{UqB|I4muo*-SRdRUn8T+yJ4t9BVQ8OSVQz;98IuGf1OfP z;RK2>%a<@EcDS!WN%_JRB}jXTgtV_v?`$XuBb|FGhvZgiZEb^W7TC-;EU6Ryi@uxg znr@n|t8?B;$}8>jPSRy^X=#u+jRa0sPn;RLxQpTc;!?G<|7u#M0?MMgh>%tnhEKx z3%AB#_32Ba&V>Zpx0)H|IM_V-oztYPJMBpCtkWjV3?=vTNJ(xEk@@;uNOLfTSQ)K? z42;1lWA^VM9l;Z16?_K~_z8HeW zn-@1;`2^Z49(|qh67zbdSj8G;T>v*8Bb5oyG`ve0{L3QZoT+bVNC@33MGV6sh~@`P zf^@W_6fxp10<*|D0|i|S381ZvRe4|_?I}f2zJwqPKKkP#V*HDPMP|`wP+6)jAvcTs zbEaP}A-4}NgU@gWPL$R*KyMDa46KN;a>lS|WuOI@LjuqV_FN80LIb^RZ6NUm0;UAq z+HcWAPTV>zpv7e&A>j73a^txJ6Bz5Nj8*0tflhfZkmcVM{KW{m3z2#svkGMsqf%{E z(&D88Z^b9bR|-OkwX7*xrV*S!7TzF}Epr5&vJ2c{O7CEnwR!txqvDUBVaj#^SY`}5 zm)qO zxn?Vj$4)8mu~**b?%WV}CB?YnpihY9BA4BPV#=HS4GN)CH7ik)z2!^okD62nmlqT=- zqB%K3J;?W~k9E1XqT%tfJ6M)!s|Av$HGqW5S z4a4IvR5oGx0FT_GqxF)nXcf3~d00|3u7flhHO(3&hU8740R}Wl|&>Q~DHjIZ+#S zmi%!7%7#9P451%lj2fvH7bitR?+Zq=jSEKXVi%+yUWwEVBvqp2vx8$y65O;)d^I|MI{xuA)c>O60&esk=VibE}=R7^%ANjVlPd?$+*#63KW%*IF}@p z!bxou+377KwG54kqLbTJp#AqJ!ZvKvSx;B*gEgbpaCb^YkcB=zWo}rEmvZGIx6tdm zppR>XQWZCZfu|qKIcU{yP_<_eqP0adjS8fWO)z0$Sq&7=gK2m*95EXmV8t5K8AjME zwK(NoT@9p!$F!GhXz2Ns=&^wT9%P8%{?Qtr?Dt;oWOxvb2-Tbznk2c8fxPDT$Bg1i zG!~og4{@iR5ivUHsi5b64D#jE19XFI9<$*VY%_-)Ep&9f1b3YETu!)*gd5JEo%`z_ z>ho~Zucz*&E?wt|XHip}92^zLNIV=p(*CtQ*+*ctJl^)H?Ht=^ZHYEr>xqj!12qnk zB6YL5Mj5iHxxUxTfsTz;?L;#tb@s{Cz!r*W(Jeq4po*lbizq$MZz#V-w=k(cooLam z4-LJlg?|2|vlk7H1o{D_2hhgXfTncn=Rs~QyG82zT2hV`vqfJ@ZP_gtOABDCnFyU} z*)2@p=Wm=j_JO_>8U@ai%&Bz8)DUwdv{e*x>P5Swkkiyw-SFtUWwu1!TXhSgtBhIZ zOOezy8bO{ck#{tx8#!p5F45DC9Qq=oRe%&q854ASsB8B(7N zO`-ko7KcPivBIzr?}%<&f;1COh6LF+oEd{uaoe)1Cr|#Jo&9IFWg7$kImXh^=H0yP zd(3RZi+MSxLh91}lSrtwYN3cnMGMcPEfP+_LW!ntPlfa*zw}C9dVMOSixfp&N_CaWRQ|?+o&3 zN>k4OZ=&LsDP40m&iV$~e9r^o9oLJl|H9q2qb`Xq2IrrgWA%6SpXnRve$!3RIXi82 zigSG7_?=^{!|x8CIe6LsW}jg1Z1;m*ob4?v6Nchpu2iifJdw1Nis2L=n+Iw8n#aK7 zT_UdJw&^U>niYv~K=)%JI#F8$YyF7eG7ctia2f}7D-q1&;5818TbYRNG!I*4omVCX z(|M*=?HY>lPx#)1qb|4pE7g)uu?SM}N>9_#hRpS$4YKyjzHBJ{9!; zStBL$?rclSnUdf|JwF+aZ*(XPS}jW5w3S=Lm}u@a(3CGn^V(Ay)}F4xOexrs3Jdi3 z5UgTi|$%Yjm;u+doEES=LW4eWY#| z=e3!RW$R5);v-ccW_dp^G^f~Gj{SFkFDXg=T`>F|nZeh_nk3RRv zeQ&xR?I{wMuZ+Ml76XILxvfC42C2V`Vw|wF_iQ?H9%x1yPsKtOvky>#?4??mlhYFP zP<>c{Tp@i(Ior?yV)ZhV{-+6g%HD<|#I5ZB(=v84rj)Du`Q%PV(KOqEC$z<0RpGY& zFkabKEo#~wCfZN^^f>MX7N70?y|E7@5-eIe#Xt)yw0b((cO4dwUS{D_cA1Q##hDqZ zC%^>isoET=OTCc&jD!x9_H#ejJinmWhIEuh+KF5n`NKfnwFBScJ(5=UYe^fHv7vlu zh_}?00oZD*`=MW8ho2hV(t1npn$a1b;LgA;txoE!0&~WV#?#DeP~NNSIM(|x27J3r zM4>vWG10g}lq8RT`ElPov2V1u)J~;m7xL2U zk6fmU6QJIY4$&(!&l=f6%S10tMObR1vgREfVv^dbzsvEP>ivS&&+CVRG&%;h)+DX# z*TSg<8%hmv`v8;npn6|BG_WtmaRD2<@9i4d{$`t?{R@7l0yRbO=T;^COOnM+gCuQI zChUScOsQaB+a)!bR#-xFYIPSG`h&zr6 zazpTS`W<{1MDPX->2(}ze;(AhZ$36sMhnC+okxt-j72OTV4zaWIB3Rs@OfN7;foJ{ z?2dVaGu=EtIfSNO09(~H4B!~XG9o+MQlATme-#AFl){;c7~cULA3`)sY};gfjgJyA z(T_xy^-DoAQMb~`eRzt&4JYOtCrT$rI=*cYh9{jys9LGf#mT|6_ad^kPDCiZB^;40 zx`?cUFM&`qAt`K$478U(m6GPQD7@!wqch!;g1uH zG=s6|%fJp_M%CxEMFSQxww$phWk@b2#z=N$_>>C~!sH#PhX?Do<+#fT zycU4@44o}Q!He=?3GkGLa`O{IC0CV6IWgLYUgsyiN989lXEWs|22u7ha9U?Il(Z}{ zjGixp^0&-uoaw|eDF0#0fn{5!Hq?AM_zo|JL6Gf_$w-1rl8%-z*5n89mA8gjq5lD9 z#X=6sY?6%Upk@Vv2^=it;Kv-y<6wssiT-RJ^$xIs|G@9v8oB=FI@`6r%MO=5&Uc)% zoK1SmT2?KmHBKGiDt5f1-eI}J2>Vy|xo{7Az;1}`6WecX8)^%+{owxa3yqJoS?XbP z!)69M;)8n4?4>rsoSz^I(hm-?qtzAK0Ls6vg;Db|i$wzjVBx_b_T+IL1g9Gy4F4Qkw?~E;XfIHjIUPnZ zm-Iu`LI-cffK+=A87q)%gMTE}vNlH_H=kf1U^>;abQ%-*z$}6j6HMTPi#*s5@m4bPryrePa2s_hEN=`aX7*UF? z2@6f1KxzA}U~Otz4kdPfwjTRC!d8jDKO1I4TWlT8ah&KaDJoiU1WO80~4lU%a z&ckak1Li`Hf3I@R(M|M{=BvL`8-C?Z-DwmS)D|K_TvHf!#RKQ^f*VdGU@y-9OOM<= z`QlIm9~?AnTnx+acx^qkW3umCDevPbL(T$@+kF9SmN{ympz3QZC4Z0NWvf8mq){+p zoF~F+Sq$d96I@Ya3brRtz+u`q%+ykvt@7e#7gTMt`JxnwC1`T5_NeSalu{g4$XG`n z)a?*T^%b{G3Tf=wXpFDpPD@=U>ZLCzjjhPIHuJGdxO5aOw%)T~GPxmhacCiaUbYAA zT7<%7shp|tb5Zn6HBhx-0#?!%BQw7+ZpaSPy>UiCwN<@R`rv$mv=lWkyffW|qY2`7 z>{djHe+QQ2bQO@3KV^-dts{9J^3Q($1=byyK24<;Z-#*pVT%Hd^FbTLeUpv+r>L~5 zYSg~X?%I0ZSK^$qmyUv6m5g?X|9rd$McXaWV=5;KB!A^)B$IDS6kZw--8XMLchliG znNeoMj%3w#bklmv(E$r!gF1~kD!MGg5?~{;^0&c z$~zFe!@;W@!~m12j{|~T9XT!scW`hc2QkcKs_%%P(TU^kL~u3-Go4Tj9&2>p5v8i+ zpyBZZ6ZLgkf)yIw62IJ8tm${98EBsqB5Vgl1)o4QG}IwNJw1XM^I&(cW0rY3u*_nt zYCAr~ea5O8d&F4kci`js4GqyF?f_?CvCCmaAK>6#4kpa!qzVzOsWYMlyKwQ$vmG2- zU~I0yEE67j>5PItcR}2^V|6f;r@(?A;ezyrxdJPHF&63VMtUVKhVhUng&i%p1mv-Rea94S+N z!Lg56v^n;__>f}C)6 z19lm{gG9%959TlW2m^i0+Vzmk$Kl)&tJ_thR$>ui{_GACPIyejIgGqJh>cSm82VFJ zV-8+X0Oq4A4KLbzNr%0Da}6}Q8EQE%%TJK6P+ zWViGAdh=0}2i|BQ!*9^cXtEk)xLT>lQM26hwC+r&|m)Y?$&$3T`= zsmaOq6VcoFkV$$E#N1Ju4`t2TyjK~@{Hj|s<59JC9O~uyDv`oOFyW;AGEFG z<+S|&A^Y3y&BwjzBP+2!v_C(D*6hv4%+hRxU{wvB`{Bnpp%oC`k11PT5dZD$y z=7-Vh14u6JNe0dd9{`KTK?JRK2}BEWoXQv*F1f#?+0&f^sN_|KYOIwW2f^0$H()SS za0vJy%D$0<3EO94w{8K{q|a|)F&+XYF0B_b_61{^j9HBt%GiWF{`d`fR31WU3#v0P zF*$^i^*)SXxmSA_d~D+s9zxvhhdE1cT>tltnh!zvZ^R8bjJWlVaORKEsoB<{-TzLr zqj^l21p+RWL)>i!RL#&M<`rhpD~><>2(ST1xlq1Hm@ObmXXYbJ*vkaAgs5;cn42C2 z7JLjPlYh*?%YAGRy=LY`Oqk1rj81diso56jC+)HPhV<+h_n<21r=(-3!O-If!b-w& z&@nXH;p0f&VkkLN`gD$h0PBh7ONw7HF5n!`QSUa_}#oBq=5V0#}nd?%!gj(<0x5|14lMB9# z;6+v$NbwIgVM*X)^w0NL)E&FODHJoop~c)DEDD=H*c7Tx0`m&j`?KrO__BPn!Bf04T_d@(PH#cm zPpyoixomzIN>+MFm(7>N5?4SFUtFP=%2lA~mlY-)6Bakf;;zH4DOG=GRN(g|azDKi z3(d`-&BQaDuRd6(qmx^3+e|#eS#TQK%X}wIUcUuf?H{hwVXGZy1aNPR-{TBb4Hj+rq->h6O^dU$pv zu1UzNQ0U!TIO()dBrP7~T(=pua9~~}YOlsIY(Z`8RHLR*9W3UHYZO>bM;v~aGy5D0 zY^$0tzJMw{Re`G0 zn_$ZfS*NFAr7N*c|Mo66d3N#iI;V3rlIU27BtTbs^{y+yLiu?e+N2Z~%GE-omoV%d z_uQRmkwF5du~ zXV(fnwf_)hj=8kb8|T;&8Rw*Tt*AraURoI?Ria11zN~UY@yDJA@3_k-ZFvKvnzC}e zC~Yn7ac}d}sfEbqwi+S(@=BBW;CEFe+R(5Yv>QxoiSyr_W^O~IasL96%;{F`r5Lv9 zaJnVD6l~YbNif4$2Cnk=3l&#l=ylmjJKKhEt%~XL+fDdn;G3(d(e@?wqwKYIi|m@( z9O{_fPT4+bN8AJ2d0{^B8#g2d4rk zvI3a3vNYxa_!P#tdUW=|sbI3ZeyV=%?iD5)UvVmwQm%toRB@^^_ST?0e5uifT&%ra zK!WdHM}oVrZi2wPvjTg4rN9cVpYkXFYn$p((NF8KA9Qt77|pr133*=K)R~_0uPUEM|4Q9+NUwyI92brTDYx zlK0C{oo%;Kor20uGUjUU(pz_sQr`NV-t>9prVz@wgPh9O@ARU39K6NBjCq)J@LY^` zm#na-{&x^R?k<;TK9p`o$qI-X#EwfnsPtggMG%dqd2()HWuW3AJ}?J4f~XWwK(%QaEy{ z#jmahF*l#QM=y1v4L@vciA`vT%%ZMWu)5H=1V{IdmwM+;#Y4++EAV1w2j+pSJgz|t zEx5?Ji7mx~6Tp2GtVE;BR z0WFepUZR047XWoqU#>c4$6F3Tex#h#k>9GVzPV2~o3OVoC~>>a8#S6{Y9Q~WJ#ILL z<1!pH7R!9MiKSjBf3Nd=Vof|C9y6TF(AXTA{qdy($$haI4T%$ySS8PyzNZE6Fnd$P zD|>jra&y4Bxk>m!sT;SLkXXCVT#=Y~Ox!p53=#|F#JqB!A$ExPN#*SiLb0qtli+Nm zs1V;&lqR5Uvs;#!G@2mz;6?_;ElW^%EcbIj_`vgx;${dG2>Y~~zobN$I~kedF>#o> zzi=<|SNRJbZ!^%vpOHy@70CD#jda7xJ-Om zQtGW1I3Wb`Gh)IX9i{$-mN5MLAg(z7iC~qg;%^8`G zOCFb@(R@X(H>u3R;hk?^tafOyNX)%nc(}X#K6l@@xdbhwUi?oemEV$ePI3Q_>2vnW zGF<&S2Q>m?NKda`l!emsmt`={POrp-=w(?L4SQ9F1-kWH(9u2w%kh_BIn0>FB#NXa zuMwj>21~ultFjPU$iW(m$nG^#iF*Vr;c+3aK5oIb&FeA<8pbT<5cXPLm-&;=A1IJ8 zmS#Ufw153k=9e3~4!v|YVm*6=SROY|K@|T{_AX83Uv4mxXx|^mw)`ekR>nD$yZcRO zp@^$cG_eEMnP~G(=pEvq{0s$PtBAaB0ei)m#V(4Zk=Kx()hMD3!p_@$8uGQ}6=<`AjHRwZ1=t|UJp&Or z`xbbGNfgV$JZ3ZAM$W=G%3#d#>Zyh_^EPrWeu)CJnMAwKAk9B+fw}TF=VP&!a?c{d zz}vukS8_?LMpG;k_Ay~Q6RhSFrJqGgA(bH1y8}Y8KkQL9qo{~u%wxhVCS;VZH07QH zRL#(%J1C;Xrh+O2KKu^w0e89RYhm)iw!#$RE@Bpyp+mXy;@a+W$gAmH@CD!Fyezg> z?(c}di1E3=VRG@wSG4Bydq}OQ97~Qpf7Jf4s9Gkd&XCKK}s5Fe|lwW>OH;^ z&sjrIQpCi{5Qj;hHMKI4Rb*1#@s(|9=lDwW#Z|aQO%p4_D0EUK4goBK_e;o41I-^_ z89)st0E=6XTp>9PWH$-ecgzR*X`-j&5$)>R=EY(ruC*b*T8j^_{bH+#tf;eF=nB?$hqVxqWm)kx; zY>%fLI{@Pf;#E&$o|J{!`C6JEmugwko_hY2PzQs_l%)K}gT^9J$r2MzG)vhK6td<2riHeDj`zZ&o$ZV~d4ib^xnp>=9#D zfCx+RWmNn=Y|!U)1YummQ-?hrFeW#KhkHy&Fe&S_y0%IDb2 zm^Ibpgc?jM1=3AbdGIq}l=`W@U*t=VVNG4C!WLrR(^$A3QGrkNTPe2lMj~#;Wl%4@ ze_D_2NZ~3b`!`q;Gt6#d47V}cXmA4wPgfD~{8&5?x-#}2)|~sE_QF=Ai1@n2buZkQ z&N)=t0_WfaB-c8RjIhf7V^lvJa)xP$16 zIr>BuQ5IL2D0?_^j!i1@lE$m(jN}S;rErL|B#gXTxE>!V-EORyMe{SVhL9RV+8_eTNBH{QmVJBs$ft~^#5zDG)zN0jKXHP7t1 zfj5k~qtS5jKjN#zjl@^MzC2FlMc%+nP^sV|Y}kYo-o68n1&)?-7UIF0bs&;-0Cr&B z%&VA79WUimk6bu9aa}g@8Nq?4@!dawskRhE${u(c7D&_?So3N8ZIc8;?g=!|aQ=xa z6=N|_Qcsk6W9Ftt9&{R(%WudsyCdY5&y&Z9H&mrnaY`qS)dn&|MT++#sd)8@bl#I7 z19|Q%ZHIID!iq^9j88krY!HRqfGsyr4dgA+o9N0AZmO^J7?`2x41U8)aBIfr9D2)S zF6xKoG{W$;BSxd~%!#hAx`30C93$;83ONUcA}n9giOV~~x_43Z+EBM)C zXyH!CclSOx_lRY}Y$jAP!BRVCdhpld5Qr0CRUtRL=!0hnZs(w3a;Y(Q1Y+%bhFJBg zA46!EtBmBB5b_*^Sxm509ZP}F5&w~3R?w71%va5P172{pmUf`FBS9Em4Z>a~q<*8v zu{FkW*ysv=2`u~31*|JiggINtgr+aRJeLX9#-UeCSj2?qf>5S|y~Q!weguN{B?!Y_ zA%^_I5sP4BF^`(Tgi%a5EC`eKU?rci62_XmMrHCAUc=TtGrwX&^eYf>5DN#HEW(M} zCWEk?3F|(E2F#1N0b!GT631m;1H)-7+(*L*s*1RQBh9ZL2T_kHP~Lgb=%izoZN+uu zPoV_2PJt4vYJ^^X=hI5ed8djXSYjGO9r#ZH_AkaPQ$x&yryyDtW3j`bV*cPDj%8ty z&pc0bromGYt?qObs@wF60hrh3SNc;_8j6c)qfE8aU>INp9&I=V!M)QFT%Lwt0tbUP z+J0YQuT*~EjH`QH`a8oSFgV`8Mbb|9abppzVaf)9#$Bp1Dw_oG_5fzHC?B2T1%^;1 z11MY|FTaF^iT6+Il+OfuOP~pf<&alBkmOx~#^K5X_JJDQ@KXALAw<~*01d?tVnQQ1 zD}@5(+Yc~*`<^x_4Zskld?f&ACzJBE0Qk-WKF*>FS$Pg1KLInFW2J5!S)ikTPC?6Bx8; zEnI#F=ua0OJw|sl>n^C2tw$cKadjM zTQid9AK?F)gK{LkBorCU>?%a4{U?UIJOFB??nA_w`sd?%)aYsnPSrEZ=YN2uf?0%4 zyU*A?VA2S+C34@pZ_1qqe@lJ;1ej!Qo;Vkrpl6*%u}s?q8r^%0ysKWj;tU`YikRU4 z_%V*`eKy?MUKuM3uqu0^6x(2*4KpVe`z1zc>Nt+`7q_Nx{P%<;{hL#J*N#N4JY+?9ms zjuMcqD*Nt;SMb*N(LKE~4o|jV+IHa;*2p_lW7WOYb?@BddtTDNRXaoY$-<2Xn2e0# zJq#45uOH_uc*!x&TQ+?H}kA9BjIrYHh?tafb za$9rt7OISME022iq*a2?}jk3*mXJ z5n{u86jl@$KEkF(`}>Fi_fk!+!$`c3;ze{po6Hu*-#olOeG{FTk6p?W{_voW6YmK( zQk+_&rq@jpJVo*Est)&!bfpEFSpFXGctyW+pTDT^8I@A_od;dqk1mS?mu$gW-K$~n zh*1$YhP%txY9RI%8fS2`>w2fz|1V4J^B{G^DjDbpSaWFk+*wI~;sbGi^{EeJu%#~; zlsiBJl&*gX^anV5LzgfpeZUot| zK?<{h1}K$4Va<&}x4tLXdOv5NtlHjJ=?_|vvRwcfuW=7Iye;DG0~(;zt41EV(bsXO z9`rC}7Kh1@g62o}F(~tZ1}KjN+UPpUcc0N}fev`Vyq2D1P=)^{t(ewm#Mc5H_KMN$yEjcr5*Wgi z!ve7OGv$~-o4gjKHg_#78Z~`luSA12K*{L^Rk$cM0yiMIo6&0~Ef=I`Ov07|=2N?e z-_ub{Gm!L1DnJ?9n@N^l2u%jmUpWZ0J~gWQ7Z&3>K+zvX0%1zyKFnJ_;SwHw0z;&- zKmh2PMk@LahQ~6X0gCiKYu2LS74Es{nYr-lr|7^Kpd<@|wa-#4lg0_sNkK|2{LY4G3d!65}NxIDpeZ${gL6?$Hvmi`OFCl{pEg5(6@j$+0UCt4vB9#E;ydXhcAO}XT3bY-pC}=E;>Y%nbeWi2-Yk;y!kYZat!aV2N zO&I1(+9*iwFsfiyOoQkX8l{j)Mq!XmVG^{9iC!_72~cd`@3u{qqza9+7f;YtD00O>8wMj|)nq_docsQPdCo{&CysjDvLwT1{RcgdV9^v;>I5G7Zd9Sx( zXj&JqUisdx%?jy}lJ>1D;;T;_NBj$3Xvwcch9bmmP$w(AEC;D2_I3R_q6P{3Yf0GMp&ab`D)PT-p}={daN5sTwCD%up)cpfe~6T3Wvp5ipb8Y(zB<)Z!+*Y)-_`{Wack#IttL z>YS?jx}5I%q*>OnoafCtA_!Luk~FSir?mI?P{KyVuh)=3ewoHREUd-Rlss{D=i zLC81i3MxF6ft?>>^OA6`6~D!kYYE+vokj!KWk8e%olrlG28UV^T2#z$vhB*^GLM_V z4H^3p#BBF&uzdKatDObA ztTaOS$z7|BN}V5#kb6@B_$hIyuu+*VkgEczLZyvL+e$bjQtkkU())=yW8dOVS%~^O zDKiAM5BQ<{m}+ecol+=hI|R+RifIL>&Kr~nm|HsKl_1tflmI0YL|L?xxAG;B5T#Ne zWoS8X<$*wYuLcjv??G0)zRo2Y2$le4J77Phsxd!a=(`O=R@ntSOsThq^W!#COkjyr zW(mO3WY8?EBj$kwrN|_;_2@Oj)dC*yGsCehku()1v07;k+Hf#KH!_W4e}oBC*u5H# zx|noCkb>7TNgi7&Xb*JY zbVf%B^e|8?He9`FRE_}%Q<|)2px^?Anmol3-=R4;;IB-;hGtO43V;x$$s^X`^B>@H zM>FVR2hc;5g$ycju;t;_3`iJCqs|{K7QIe6BRHo$^pWcE^R{f(>y$Bw7={OQLU79; z-b9r%2={=Kt0%J351awY$A7T;hFvc!g#i4OVgOiN0l>bF8Y)qLB0;wAWu*g10m?D} zXsuQN)(AlNSR}zfEzkm#aRPwNFDugoZ~*{L_I{7)cU3FcA=d=j9#)yG#EoF%4RZj4 z(hY0@%4&wGDDt_tvK~N);=$G&tbQ-W6C|;AVL2hw^sWDO4n|Snb)eqTZ(Jpl()KR* zz}2?wLdXd!#!pGD4RI^{pX-#@?7~avKd5C;Wo1S%OR(QCAek`bJjbHq_7Jv0 zhbb{PIMP)R1H_!%?{CUXr=$yNS44t#hW7$=?HIgj{BOVpy0$;Rp<;kXf_h3El$ud_ zUCwD4!55%R0gjKoiADd3L;6|t&9xPk&2rc^Sjl>f{E;XX8`f$A;t#N7GwWXs%HS-Vz`J|7OfhuQPzAF1gp zMdchN6$%pVB`UWMNx_WOb8)l5ZB$}zay3G9jL%~HZyGaQ(B=u+BM~RoyEcD;Pb8$c zTTrV7b$~M$F%?wk{O%r{pP&v$EDU5ptx>aRo?D#j5kU=h;kX5NJkY6%I${K*3u045 z!jJ|LUAGhD=xzZo67X||RrRYhzru1!3nV~_H`iQ!50!MoaxFT4JTKll9k

ONoqBn?9RSWEp3BYBgY9pS; zWh+3Z%n$=|njl_aVvRunJtTUJu}uW02tw1x&crbmrT6GY%52>v%Chb&DAIc zvGDq@_~Py-N09>Icj^J`M-OF5U*xP5z)P36@@f{$YUFIr1 z-#r`??=M}YNOtqVEa^KNxC?KLSRd?#l3{fTPQ{eiES^bh%;djifW4$zg6yqiY+xa} z1ak-!+`|mUb>iBDac#Ke@w9~^-)BodVXf#O1|`45h@|3{h_-7ig2)O-p_Bq6S2T5( z`U&7L&P(zA@@+t|r;R*=ffEkJ2Yx0|C{!QC!6_?mtX^}=;s``}ID{_9C{2htmsIpN zlN79SD6Z#C_;$TYMXP_yA8Xxi!5(O4DEbJ`cxbc%<`fH#*5;1Me&Un|8{R@eG7wh} zSt4>&3?Fd55>7rTZs)jC_0Y$Bl;ZB(`NFyLX|(msX;t0f(k&GjP64p#o9*e#x#3u{ zFpF_k)m?e@xhpcj5jlopH}hN9ZkLd!c%nmz+s5|N*u!FYNGek*$S|jI0-2*UdK+mX zKD#ngicDT$A4AAgxpJ9nU3Hq(vTqk4l+&>qWi@gcG}THP=q>puvX|1+vo@kN!em7+ zP9!TC5XG?H|59>3=UHi^U(|V%n72GNgW2186qa%%>4_POy;TR;!@sKvxEexkDQ69wdJ|v9%kS0#J_B7{r*uGKaPVkW zRT+bf_#WvvQms?kBE23;Ik=24ggEv>5nCM;W6=MxxB?_ViS}Tn*%RZ!U5pEZa>T@V zi6~JVhJ{i25x>I}eJ$pUJ%bOboIzalV#KYl7&dZAQ_$yi$|N9g@`w~Li z6;Kq{+!@WSOd-?C3c<2WQ$S%{a>JBk`P1u?rlug3+2(>vnUzW|S-DYZiv`+li@x7; z&YhWa`M;m{^;6tup65CHexBz!k9L{G(6%|{jHpjH2_4qZ7&?Oot2oCGrla^thF{0< ze>1%MrN)TiKVa~ZFlsTTbV9>OK3{87z|ld=MUCO?5}LEMPZ|CK!}EDN86i91*hF&> zn!WAXdWPT5@HZoA4t{1iSOw`&oaRQ?_G-C{>ENQn#IDr?X9sN-@YrAsqx@`WXmc3; zD)6Yy;25%N-vHlU8;CCN#R|dZDU1#F>}Z@;4m9?ybTnyiAZYsy*v@*z5u-H&O9$sv)C@V1d(7% zi{XM3<1Wi8ls8UQm{+LLLD4| zSjH)peD>QFuKN{jN~gMCoo%bkL(DGIHdFe($SmvMierjD1sl^a`o1lx?6R!2c5n(N zM6KgKJPfRD?~bm6aoC-W!xq|z-E`?I=!Bv7roRtbCm*4n?|wyXX8U9EW@ zZFawAI7IbI0m{mW%{(7`YkI>Pd2~Heq(w9)A)1G(s45n z)Nb1SJ!otjfx!lzzJjcE1}0uBW}I%UIZd^j;KZA&z~cnXR-WOYcGKGSq?n0Vg<`aI zEYvE7$-G2E9RklV?Iu>9NVf9Wwg14=LHnEWTpdbBLJpRc2<>rT;q-apPY5OS;RJ8QfC_+mE3tj zV)l%niL)(~_7^xiXv-LM2=pHOA zndo3mRi%4((}NgD=v8fauacE++No!oNg3!rl(W0kMGsMco`N}?_7qv^e6SE6a&i>y z)|_V2R_R&3t;ur5M6zeO?OF+5y}pOO+6~1fw&hv*2RvFl&Eei#Jn+WSA;Au9CQ1yx z!A&0zq5Gq&vs*(A;ZL}qn4wi)YAT(TrgK7B0`^vTRfIBS&Tbn?KB;MIr!R<5M8{sZ zEzP9QZR4#xhS_rvEcBIayo|>Xhi>D-ls+;7Nnc_@Q`u2S~)q{_&(|%wbOx z(9esJLXUb2Kz4_^48GZ{n)y{16!dSl+DSaPxZ4cKTk{1Kx3 zbvL}!qATyYOI&Mf`P$B2viGyiOlvEOtdJ=k4ku?_!o_nNhaYfDzK%Ch6u9U z|GAe;XaT~Wn(y_We|4gzqvC5u9dvdxlv=1zy8QZ?Sn|_ZWHL+r4wmWk=epoeMlDIZ z7x}CCEsC6c92x3y4q+j9?n0I^Xvx=I@Mu18z3Y7rGtei%l6W3JnfBk67ryqA0{XMI z9fZXbp9FPommAHQ3QZaP<^3GQ#H}zs$hpVnB_}~79o;tE&gOu6lu}QRKL%NH0;nyL zK`nm~>7-8wb>9V0+0WjiFMG*U`je^4@o!lVN2OMediGVt?eRVy#~2h`H0;GOuX)l1e>V(rrx>0?9qSwnxq)FO)9p+9?l zg`c^xpbkHYpIcLrMP_lKJ@%EC$X)Ss98r!6r)^G)+-MvP2}TvA*&y(x;9IrBuM)suY4d)2I2kb01+e`-d^avEdM_5j&ZO6A;40y^T~ntIwD1cAa7 zm69*-|6e^>?#@P=w&Gn`ilw$Ih=fVYBeX*ekR|fi<@fNm*X^I8NdAc97;E$=lb8>l z_AkcJmB7!R9>q@w{dqO(+bHtGofeDfy!Y1LfPlqsR+)!7w=r zLr`mvpBL}e#F8>PK&cF!Tv_msmn@?rcVk9PROz(8z2r^G3jsRx%AB2IiCPc5`SU36 z6MtjYfYl0S74Gt@!`^$mcsu112Jn{+yO6gVgB8Ma3_==X%^TlDm45IS#$ZGI+LQAa z^h0+3=_O<8RX{ujF(vd+cH#HHG)xE`lWue&g$)tGRSEKtUV|ri$seg8P~jv=ocaTj zj^#-+IXO#u!fz;Liz6^s8u9F5FIh_?%Dnb*Bw0N2CI)OHBrQl z_c%Xx&UUs5zYxAKysP7yW3{8d{Wtp-`$)TF+hdz#b6NLWpRhKwd}5hnX-oR;`KFD$ zkS#XRT25;!+{#MCbwSSqExlye@92fOakzWmb1@3Nu)5ucxEx5Jhkl08R$-*H&`$sE z##+5(2OTQk028fE!rw0Brx!|w?vSzJJT#1Kqw#}MSh4hSA$bs??B2_H5mOkQ_mGmZH%{MKqA ztc%i?=hN8Ut%;IywUzABE)lXguQs8Ur3ko_wwu8#acLK!^>!gKs=}N7UOk+N@v9S= z{MmcWH>|)Joh*N^9)`%3_jJ)5`?>twd-trHbQo#%%&;Nm$8sbXI#zu1a5GYSpk-$= z^RJqF)+HW6=$rxdyqaS=oqpK$8%IoF2r8F~AU(cp=*2SiUURJmH*-<^cj9^c?hP&9 zDD5J6W69Vv@4-0s1zQlY9f)-+D)Bq|UYdou4@ua?lQd(z-L|7}WGpruVX-mGPqgt)5 zQu~^JGjBDIFO}l+bky{F6y1ZDl#IK{ILFR>-rU!lc26uT2NPuLy zSgW*Clfm5gA=Pa5YzMEF?WXsY)Ts=)On*nQ-??Q8`tLpXjVW|qhtAm5^T5opOpDiB zdbRp0h-?SD;tl+Ko&Mw-SoV7|eq-)`6MkDRAsVkHStw%VEj)sJxfl9gCjG4yfUT3J zF&gy^qg?^QtJN^XRi7t-=xL?V?`hixc0up9^hybIOf=gtISSRx%18FK{;)a!T@y_u zUglVgyWB?fZqIE(oU*(?^C4QiXj|U76HX2M2lXsN{(Z5tS3B`N7F)cW(khY+UftPC zFLZFpfg=M#cOr>#h>l)VMrl|6X^bpBi+V=>bD9FP^xG(O$?dV2W!Oz5xxB$0t2O-w z5$acy35Y6Idz@ji+S5DVe1>@+nC2S2YQ?;+xsl7~`C!nHYG>`)-7?4#*D;A{6OjmN z^fV^HCly*rWk5s+PVApyT7(#kVpiak9;w-%Mt7FwS$y=f``g+3(NNV}Weh(i2J3K^ z&ZX8iDB0C1s7Z2ph1*NU)899@VJaj;+v;FFEdmG+*-2>{lsRA-*|#40K4oCPH8&VA zd?-WK7qnUarWqhL;2?MV*6Y$l&ZXXsRo7qm8`aKz*1g^RygSp~(zUN}H>f#D_UUMFBmO7_9 z2Rd88)$pnCSHtte$A%|{TO3y%?>JUDraAgJn%aM{e{6rn{=EG@dwaN@ykvXJwwz2n z^lw5R^C|OobAfrBxue-?x{3k0()6gQx2dsmQ#r0|Ql3+MN*hI%zkkFnKB~T3tORE zqesT6Qu{c?hkMz61H9(hK@jf%_8v1bj?A;=s-*vbQ68ua7VI4uFa1YW3>XzhjfJY_ zCdR=*#;Lsn<8YE3PJD5u`{ETJ$$qFqEQvnwxP|;)(#}J2%riJd{q>b+B*#JF`1FhI z;>d=N23kmim%%d61{MabXuQ}$wyp#2^81K@LNcD9Iy4YDFW=irB@OOP_G)*IMBt`~ zfe|C{d?1+uEMCsax)*F47cWxDntMliwHUNCw1-O(+wP2Hq>z#9fj%ihZ?tm)v;0Gr z!=kks4p5c{lEUsN#g+NjRpRfCXzm`z!@_YX2$BELNDGH99p7&uGY_k2+R7mpv=ZnWudUGW{ z&yqV}CM!nGfMG`(Dylm<1Tv|)hEgBV=XV{GU%H=MD_SiFJ6gN{V}-E~8%rjzR8Tk8{XWNc(J3r{=L{`QAoK5|PrHKk8UW-_2K>(BCno!_Z*)dB(egZRQ4gm-avUi$cv7oXPwi%k{iW4=zNhK zRuksl<6h*>cBi_dTtB*wxc=vQ+BMYG()pkBytB-?kfg7Cw@rSAy{YXN+X>rd+icrN zTU(oK{Spq3U$8!G?P0BN`Pp*JvfeV&GR)FS{ZBoo?pBM{2i0WNX})IOZ(d{0F%K|D z;}zwTxFDTp8e>W@sW>n0!{g(RDH%93{vv-YZ;|K7_sJb(Rl1D#m6uCXrF5xbSdIGv zav4Sj6nqI?)tn5KyxLukAvr0pqskx4Kn0(Hrgui1mcRzXwVmh!lbV*RWM)nVy6nB^ zcge5$yijB7_?j=?T+ODA>WKbcdWkh@R2)edkIwMcq)}j=3TWmNbUONCEPBqvH1?6u zv>zFV#{klOqvBAPryoUHT1pfK-u2=%^iMmPK1W6JX>=4nJxBFG?<;aq8b)bE>Eg6F zQ&ZY-8591ECDT)onZ3scdZpJ$^Xd?+H_OL3=mBcY@3BRg$K?{TP0*ZvzSG5e0f(quEzqmV352O0z5 z^vC-V!F~*Yo}}b{FxSw`9lHi z=UzbCfy1=^DM;>48)PPLo*EcSPR+WF8t#kwU%nIbE`_vHJ!y|wGZ-<>{Rp!ngIuX7 z(eHgndC3(k;$~2;9ev{A)u^WD;U9%g9CQ{tHw07X8ggZ>8iy?)`*a{lN0HO=5vaRv zM^W`X!y5ni&uy6Praf8nhS?=!c2-o+nL$0;1T+%eNV{@gc;}?=ukk%l-hrNOA zXWKE`2HUf?5w>{iUF+x8a_d6tBx^UT+w#4o((;le&oap3!R1jE46+K<@oJK4Ggq76 zHNR+n+}zjP%=D}26VofE=S}yS+MCSEC1tO&Oql|Q)s5s@`9ru6oF)6@Ht;?5g;XIe zma?T3DJtwnXoA4~!7)6eUpg0qupi$3P~%9}f;6=Cre$7C^?NE8#!I(}w0Rl4@@+@U zjM=;lK8^MUpsYoUEM(he@Gd8tHZO~Z;=cz&8=ZuG9wixD5VEX%y@fPfu?TkqJJAiM zwpi?i58&(-i{fBj%s%r-)ho;5iK`hJE^g~G)O#6;DMQdZ&qiZq4A1rA0nXg;+&F0( zo?JW=M`|~tkG-`G2~URi2LwI!;99t;dUct{Ed7NEi)62z9Sct!nXl6Hw-%uLY)8)1 zpF?yEIxrlapzA|)Xv`srJHU12JuJsvc7Q9Q6a)9z#4HbPijhqJ?aN@@G@zrId^;4AonAh86*Fe}Ub>zy0Mo%TR5ybTl+g~g9n?L2uqooud_Y_M*HQfA zWTXE&DrDHN$mVt*nw`$SZ>3^yfoFdzwqvbukHE;E{Ni9#?%Rk}-Qn|^?x+2xaL)c% zGGYhCl@7-CWnn4K3?!QF{YHZcD^=zWgl{ns-EHPe2>;**$of`FYD#WWQh5tZ9>wcH zI(aWL&>zbyws0M7x$(ACJ~ezVk(iZ(lk07AJm>H^+BhWp7xvffFW4Wp_arM<&T23v zFa>;PIcOo4>6U?(7V4ksDfLw~A9u)!aJhTMywAMC{0My2H!|HcePr5fnr#|sYHL!I zuaq~G7nFyU9(Xw8C;6znUY;orlUqsuN#~_|}@HxQut&t~4mel6L}~=i3J59w~_w7A=Y+ zz4~^Xr;x#v0 z?z?VVPbaUWZFo|88Y$eiJ`N6p*ymyXnN0SW$D;%d%CRh)k&~M23>|W!Ikkp3d!U?J zB)*v2XGg0XMXqAO(80UOuVWc&TJDuzBzu~cV+NwzkG;qGBDO+|&Aq1%d2t{*bh0lt z%Y!i496Je;D;3bW9Fv!f_y|?I_tZi!DH}Ob)%Ko>K$3m6J^13eFS-DoJJJ{O*)0-A zE(OW-4JUNy%G$*it^X%9M7&mxGgCaC7i@71)5nT?F$!AUIgi>pM`G@r#|Go>arEWI zi%_-q5os~nNuy0^!ZrxPqoc^g7)BgNe3Te{`;3Le#y$jEcMOqjps7+C-va*G*K2+% z1kMP85K7)eVB9|}KNcay;Yr?-wTsbEc<89{V$_WFE1Z|sF2?SP&YjmAFZO7|kiM0C z*LZP^j3+6QURoR{%_SY1fcrorI(}YSjOTo|2B5O-mvlN&Ow@|J%OSgH*fA!EM8&sm=^A}n5$|_}=(no2EQ^yH;vpicKCAX6m>7uk(S|&}=PaPzq;LBE|qw_0O`w+_jrrid$ zs`dduuQXAM#1b@&Y;a&pS$wLY7Y~0YzE&FtEkO2}N;2#xu;^@EX3>;mcB~#@i%7+( z{Xji264S-}bsijJvG-$;n=ZUt3)i7}6G-7*C1e&&Oi1LvV6=Bo56CI(Y!w=QWl4y_&lPJu)zG!-sUPnwlniv=Qw5 zvQ@@W!3PZ_~i(NKl?lm$C%>GrVjd&8_r`)*eLMSq^9QCw!38fswF&vm0^(!n% z3lQE#*S&M;GW6U(r6&gpjX1v(%iIIG;Qx_Sx=tXbmL%prEY-sgW1})kN72IXVVg0ND|9k=At()roNkOYF!2@a56?9&$34_~RA;MC z&<7Ti7+gM*7U_>;CZ>pm@InT})%!6u&jf;8eR-6sJwt~`LfBn9Hse`%+KSyM@bUkH ztJ3w7E6+8^<#GPa#{UB61UmjnLT3Bs`TOiE?T^}f+Z)?%+K$^c+McudY;A0k^$Tl- zb+I+unqrN#{9ri@sq<5oA(mM6AN7p-nz}%JK(qUriy-02}d*RQpb0T*}1$YC0} zv+i0Pw17NkcVZmVJ=-&9ccOJagb#$BanSm_ijR!*GX0VfbmOm9?eu8T*cot;oZP)0 z=iCj~yxPf1db+&+8PZ6k$a5C})zIZvJ&#E7^j@}x)R6n>rg&2_y=d?>Ss5#b?I5Qj zYq9U78OS}&A26Tdr-X0&1L8Z{j*4hkF?aNl%Ugf6%b1Lo;QV46XuH$tdM#kkXB zh+HzFNo~BA!w$JGb_4U}hcI3!B!|6ADtka(rU~&4SrJF_n`0enRC_#zlr=54Xvwco znfc9>i>SfosJAkD@?nr$yJpdfK}Kcbaz;DHIPa<_=ymT3kCqeN92GIO_Ba{>?>9EU ztQyNdUQ+lAfLXyAtdq^*OAH$%_|6-|asGh(~ zNvBr^{Tx+}IrHrsUg={T6fm*KQKXX0!ilL~E&SPTLk5-~$2G*L1DJ+fYG|rPvk+G2JjwdO#}yYB=PZw{}RFPX_AW0p!qgx3P8Tigjo2?AzD~HRD^J zXM1|hSwRqwCB@9U9VdtRNtSj3_mlK^r0zhG4n$!CvsXKT+;=3t9kuc1cUY*B`Eg$D zH10RziSygz7VSJh92aQcpK1FF^}I#%V0yr1F=>ZGjRX{WbxCnrYX}BS-7SN{NM`Tk zM$PE-@R@U`bH4L_XD6p4{G0Ii!&iqt5#B!>=A0edw`$zl7FK6k;CBinshv82icyxT zT0vtMYHkCa;MJ6isF>$41Q>Lh?f@AyJ%}rJquVjuL5kak5ZGLAO1X%!L$^mCbOS;n zw}6ly)e=8ygt+gn8H{S^^&`e#TjKiRI6BbUYw=PA`;e^Z=t4!7yI#{(HXQpMi6yR` z1r~BEu1NJCc`b$f=A|=!OrHL!dypF$pJ{c(_z?-@wl{S^@n@PQQY^PccAjq~AhU zOly3f;^$DpS1jYx)&QOV5ud@el(R#We+n4Fe+P@nDXek$U zsD%&5`d6+U$K3KdM&_}Xui;!>hQ+HDUBfTqCQ>ugX_;(z6}zbhUsCWYt^#y$!;oJC zVB%X?v>L3t=9wl%Q3d!UDJjgo+r7&DjQf7K#dX7V%Jsaf4eqoTI6H=43SSc5&GD_H z#F1{lVP9+SkIQY%Hq?6Cy3IPu8fMvTx!+<@-&C_zm-#*Oqvi%sa++?sNBKy3Mv0YA z%X8(n(&yOERBeb!Bg&g+ms>Lz^fu%9WH+gN?qw9AZ@orLW@+n^xu6#(_`2hgffBgv zl2Xz+CA-fZox)_8l+u$^q~pyQr7@FSQbG?s$X`6N#@u+1QqnjjW7J$8M9bWUOK3_- z<&???Ai6{b8iyz;oKiC8UvKG|JFO{mcaDne$C(+f8^>kGedpD#LfND-vyY?PT{)%d z%$>pXXR`jY-A0{}8j-U+bm5ee1%6&sJfwIhk_PF_37h1{IYDD)Yn0N7Q!4uNI%q_# z(pZorJ(U-53&(1jnx4@R9rd`Xi@e&No-hp}Stlb|@`;=hS%G_WM$wpY84Z-6$5#1# zfW&OXD5(P{-HPlLh%aX3yDYx;oRTrHd4RHK5HDyuPLaOt!ZSzhy;Om@TTgF0{9_-h6-Np}ir)bFuo(tPKf!P?-2x0>vTI&Qno50HK<%C;R+o8N4 zZ=9-w2=bw-Hh1kbh} z&|mK+qw!#@8(~YzT+mz(Qqnd+$?bNHQSRZCq{Fz3=V4&J%P7tCFfmVY3d?L$jw}qZ zuZ%`*Ac-bAdv`eyQ5H;lX?`1Xitmw;-gQ@7Ay+rIUt)wtoKRgbloPa#ISQ@XCJP#x zJ6oMl>3aY9dbXq{3mWjBn`T7gXXb+XJX~JV)9_RC=H)p}3^k9?!t=BTC z@q$Qm5+e1aq&_iya`qFCPJim@)GnR6Bqt-r2p;3Dz4z)d;+dZaDe3i0Q_L1qX3RcQ zrb~{nJHy;Z-3#4=-A-zv@Sn@v!_@9UiEJA2M;z%m$rY)Z1Vq8VHIb?!An+_qq$;N& zh>Zuy9sX041M{rx{vym>oJpB|$$aZI=u;F26!k}>a4(8$q!rMFUI8K>7@@(Z7d5CgEzDHRt{j5xp4wzG= z6=sK&WxA+zHSJa`rhNH=Dbsqzx(Awc^Q9!~C`q=qv^`<@!`9RCp-FVIT>pbn;)hyi*SRM9>$Y0NuL`b$7o@u+k5zhl|DzYfc+I#aGn1{hPkz4eSqZyhVc9lVmZbz89Re4M;W3dggC+wQu|*5 zksM}-su%Ku=~Oa|CuMpNQ^hbDq$oC!_Y-8puwAV!JntWoPw_*~_i-K?nbLCqRIn8v zKYZ7Ik}+)8VRH7yG#JWIr1?$8Max2No@=3+S;n^vRM7sobN$?YM#ZqOP`G{4G7ZP2a+j<0I=Z=ihDjIl8K-yq{vl8m-`IL2;*^CP;J^`dc@RDsZn0XjxJTcBOjLi@*vqM*-uHUq%l(Cu!~{D6Ju-S zPAQrd?lYSto6Ty)f5emUOKcOCdvn}`Fg2ySEZiftMUj|sWxsUV%k?-@Nqh~J_pDMd33@n>(C zEyQn5Ey@dqr}1y96Msrkt|55F8~1vf^1e=sZ%HYdZlK>n_)zVbQ;KppS+BfXo@N5s zno{(nA#|nEQ;Z$KpU~+xR`uQR3`^1MSTo0+&X8d1vEp3fnMo&#*jivQEvl%(~LUo?4X7=z(V1bRo^yOld_ugAB9?zOT~i6d|pshk>K;T!7CD@HqiKE5K(2_%i{n(&2o1Y&tCvP6_Zy0sd5gKM~-M1^9#jeKv*Hb%LRCu0528b7X)~T052BcMFPAq0Ow=)`2#-y7YlHa051^W z`2t)hzy$&<&MWf-{9FNk9xyzs@)>ZBK$tDS&k67>0e)71pAq1h0{pZ9KPA93LU6vE z7y2RR3h;CR&Jo}z1^5X8eq4a33GibA{HOpw!eD*=pDGZh2=K!KJXwGr65wnB&Jy5B z0z6TG9~9sTVx$iUg!=_}ya10A;IRTcMu0~P@O@!n%D#+__1ei)8ka7SjX$k2>xCXA z@QoB;p8#hH@CX4OF2KVCc&Gpm5#YfBJV@-E_X>o80z5!~`wMVC0q!fneFQi|fO`vY zFCAu!Ub;Z&DZo7hI8A_41vo{3y9;nP0q!cm$pYL(T=Y5%giZpSB)}a7I8lHT1h|6$ zw-?}c0^BwTW6^69{1GO%7T|aRjuYTk0^Cx7V+GhNz#ajP5#SaS4lH`n0-?D8-y^`y z1h}aHHxb~*0^CS|8wzj(0nV>4{HQ0uQ34z(z!3uM7GReEI|VpgfF1nuY$d(v_v5BN z+;uwNwaRs$tD*Bt=l`4&o!;>4;oBipPjK9F?1jc|n*FZ*u-$JTY9tnro`mU1ZFB&&a^@2d0E-m1xb+`QC00uMf%Gp#jEFhwg@l~YK$5i_^KBc8N;U)w>21o;_2FTZ$2oTR#jo&DQ+5W}sZ6Y9i}S{xnZV71t8p-_t!)ME1>o#w$eCrzkDHWcqM zWFfMo&QNP+AZI*fI9y{WH8s%lx_@izM@iCL8EJAK_0>WjVMEzrh@j-7K(>)XbVP9t2g`MI`WKS zYmm-{iP>%O@oTW0;Cly zhWPWIRt+6cQPPTK0|(Wb5oSX+(j`L@z9xnXVU!`v5JL&C`c-ir|qo>0KD}p`t`SwxvmbTw*AJ~ZPFi$83?fiU2vc<7^vJRem-gA^mw=K=7%Alci;66UT z+*w_$uUHw?^Tmu{Zs3S|2L#IuU-D8`5Z{ip-+4Bf#BV<9;Syh^Es5EDHjzw6Y&xb5 z{gJG7ehpf_Tk-{9uk}WLDX&KI1TZ!WW zZK*5=56qrxVJ6o?!CNur$}Kqofp$lRIGs8=AP-ks^59jeqjh#t40 z%8DkpHUZOl*IBO~xH;*tE6^P(e+_kqd&q=cfd;5-7f|R>n(@-M`=yO4Euyd{!Gt8( z6PmKVlD+?i-dDyk_PligB?YAkWANM>r_9j&*_soiJW=rvQ1oZfx5D;fA| zu=b)zZ*(kTE(_LPB)Pjg*bZ<%Ru=r>CJ&Yc>NT=uu>M>u3qZ6XwVTdb$>-%~z5EVB zaq`mkejR7Ec<8)3QF znjVf?>!+jrQlZpevW0z$7by9^lwv+dVkzXS{RnZf!KN}WKXP(X(L`~I#b#iZVG8y= zIj#=fsGN`utx@A!Pl21AsZ=k0ZbLE7P6&jrjv8wSpHj@V z69V*G<6=ZQ%4bwgsQk;Y8Nosr(oUeefs96D>n=jYM*W2BrJoC{H4-qYC;0v`B!I*H573XSz5FBCVL~Cj{tuC-w{LFxxB{H58;5Z-^Cu z0)|u+Xad#0b`~OFyB%FcLC;`zkMVUP;3^7%<8{TDJh6t@W`ip!pbgl5)YtA;p@yt! z#au%nP=s5f=hn@EuA@LBKo&g<775`|MO;TA5I*T_g_r@lSt$-_DbO( zErqHpBkPorYboeSXc?WMXJ&{Yt=OoikS9+PGr?*hpr^oh8dMKi7zyYoMm>cps5XU; z&}gTSngT6@XWXko4iJB8u~APUW6OGR-%W=K*HZ`#g^G29#0+34-)|Vog$>Ka45So~ zH;}9U98)LwI0Kz3pb7a;DdtKFfz*9Zt}p~oE#@i;w2EmXZ@PKA&Oi^SC~*3%wi^xM zQ;Ll$3ROdPiOHuHhcpyu^wr(GV(nN_I{^g+-aOfHhWK%X!8H{0E)GpR8{aad)Zjfkfs8y>B9EE z7#cFI*r=ya1+90X6s#t>ozV#efZ3Un@^2}mu>Li!j-0WAgIkhiW)6FUl~ z3!|1o#UBO6I!Z0(N(!_FX#5!q@{KK(Qf$;x*!0jWF$LtHj+O$HB*l@QR?M{&0##i2 z+Ik@esDQNMkeUK*r4n2agk~F7LtITEP(z;n;-ZbY!l6CM$Gtb$_SugzC z@V()FJnPolaocgivCi=XI}}~DR~QaOkJ?h$p{UH7Z_UJui@#WoSXPJqM~_8Hmc^nZ zsb46vx>N3cMx7(Sq7Id3s?FsA<{G(?`JnW@d8PD@`5|eExr_9GS#?*rH@ly5_j5OJ zU3b0hTI{+%?4*lqDi{YtqxM=7tdu&FGv5{kG^mw+WaAF(_WC5j_KQmfl}x&OlgX`1 za&V750*;`ACy(LeGJfdW_hD?SS9NG<@bEH}IJXALLm0W@gQdaE^I(R_J|4mhVwk*- zLYR9Qrt(AxGmv7mysbFC=m`&Cm`aM#&oBM?w$ry^7T*e6!fsL?nN$vJ96aYGv8{Ph zFFagARiMrM@RhwGE0{rkvY{CLoHv{0rfOA)C6PsvVLhR5$2A-{_5E}Lc{0C#0?c1= znk>9$tIkmH%GuTA?4hN)BDQpBAL{6&q!*5s0YC@nW8 za3he+<4nudlZ6dlWGFxv`XiZqvu;pu+;paemGB@)&(@?qdIVRmRyu`+!NB9@^f)Sz zeN9@GYs$|E+YmbcRJqo;#=4r}{If|v|CEP6g-473a=h;-z$1_O&h4%OHkHsTSSMYwzR8w1~TK%-rP5wvTFBi)F zWUKUvv{F*B{xwEe8G)|k`$DQun$%;Xi@ zYU@nY{(xkOFGkg0H5B7lQ~bJQNl#yTwN{89WTRv$?+v4NuHPtFlG2Qat5iQ1ECp6F zA7U3(w^RH9!4hBT(0D@XC><9p1xi=)*oC^K(P z=W?Y$@T8rF=1uj7q)IgZFqo=Se@2;7RcBG!fW)~-DG+;J@l!(L=2U-3qC{ga!HjBb z9fLrL(5oq7L!IUi$&+Z^Ww$>p)Ezyz`Hk`H^t8dN=TmNu%c0&gOxuZPU3yI%9kysU`_EG#Yss&b`j~AM7TUD z5I?;M6Y3cBfH;Y#zvP57b=B}6|+^>c|5<)AU- z{hCy#rw1fTdI~=pbCBi_iImth0sGH7M+U!9rsSzRQF4(|pbDgy>P&Q8pacsWHVx6M zaUp)Uif+PXN`VsGs;q10;OA1MKnj)nGwW0`mnsG5xBfgX(lHo~GNnp+dmTC#D(T_1 z!Z)pki7?I2WlDiY*tD~!Fw*Fe!EY2QNy{%A5=iqKrAmdz#*1~p#wwR81&UB26^b!n zKHxH?Ko&CoI$k#mVTPgN`{xZq1gXU#nGzBq6`zb3$09wWbD2^g2cF&Y3=yOibBR(Q z2bD2~vnxCJaG6pd2Hy)tGlb$fh8$KuenB|*nC;fFVd+wmr2L{BQC2HcOqcPTb$gt7 zPU7}9*O}pr!kZC+Gtb|SkFBkhu9n-1#d1QvU|9#d&?jUVgULfIZuuVds`Rs3AstW` zNGsIQQkEKLDl`9S%6ETe%5-lrdE8H%kGuPuUvhVEXrAW!&Yb3Y+Z^s%A|=62^*_8- zwb7A>?NL4ZxAwizo*8RzWBc3o30@G$we_*pvwmyci(%HUd7MdE9X64`F!p|3JT%{$ z=?QG|a@i&?V{7p0ekwy$HoYC(M^DkJepjS`*yUku*}wRuZ>uLW?oI1^2F@T4Q3Om6 zzgZkSJ7sGVaq0?$$YLQ5J@;}j{z)4I@1fRg^(JO;n-%&vf-}*|D z)(eKRf&7kSzqlk!^WDBDB*mD<0(x%$5jasj#t>4QBf+>IWteJU0%yZVtZYZ0`JAfn z=y`d3iJ?H#S)i;wcLMR|FhrFn#KKMrIAER##`^^2!Ab4we`58jeVm1&xzV@u{JcRk z!#40*BHCe!b^&dZpuGrqnKlI#nLrLd_cA<~Q4?9|16sAZ6p+@bzVm;ZRcWFERY{Vu z^36eXW8PX}NDKLP*2^)0(mud<^uC^6E;upk(wp=}!793+WwQ_%J)3J7g1!M1=wstq zl}R)OuF*?gK9!m}hrZXmp(OKA`nfRuMSVl7z ziqTnGFhq5iMZuQ1Pd~+2GkKY{ng1wwM00YVJ8)XY1cygi7}}nCBwSWXo0C6l;NY%ZY)-u?72K2|A7&1!j={BB$>q@Rn1LN4?f&PC(l$mObFa>~y|@8OZU zzIdYUWBYP@rfzad&qd$CsVpSfK88ao~#(MQ=Evgn$0%MqmiXX zbEy=+QL0q(v%#Vb@f)Q|d8>@(U}=6XRSHb>zRx!cOAWO-^&7=XB{PPLd*>8CmnsF8 zfylR>G~_?c&!tL%_;1C0D#XuLQ=?cZQobw3pXTRMrC@mTHgPefm#BU&R|@2y!Wcn{ z-zZcnM28d_02N$EsOk2@Yoh}sZyYZBHwymsA0Be42YF@0@bgc5jO)6XZV9s zC6)tEsZpGf<~NF!N{;`Z&46E*E9qs3bb7@+V$&BAEYUK0*5-&=z<$Kffe zMu1i_h?TTTOw`6^Fo>1d!YS-K=}oXdiw&eeHEkMFPi!c*D+`E~^oGFWKgOP-i7%21UwTMUneo)F9d4KEcM&0|yi4;Y3zT`t7*Q~mcF!uuR6#7d;? zXp}2u_b>=ZQHe&mQpNf|>mIy8!w zGLo8!bW{uFIA5aZu8|SjZkEQ~o+qCA{J!bW?P~ zRKG4((qr(It*hGxT&fhvLH54|Vil+QxkxEcLAMHD5W~at)X#-Vf$))j!&OR}-zZbc z=plix1?2F=x!Y0IkSc#9LyeNZGC2n&v%-YR zYJdxs0-dbtv+u+RaE)M;DJA{bNu;Bb^)O_>bLSavJuWn%LLoC%mD^#h&kaux_PZG_ zHI(R!JnyQ46^^nZWGkR77vNu7sC|6(o889tZ_0xZ~;DS~a8 zGDzSNOv@C(v`o=W%b4k@BG{HG{e-}RZJ8q2mMMa5nIhPhDZPZSf@zr|n3gGmX_=BH zn4T(vZJ8q2mMMa5nIhPhDS~a8BG{HGuq_J?Gr_b>=_KHTre!caRRr5IB~f5X5a131 z++KhM+cG6&TNb7Wre%s?TBZo5WePPd3z(iNf^C`7Qb<6sEmH*BGDWa0Qv};GrG*ey zFfCIA(=tUcEz4J$2>}G#GDWa0Qv};GMX)VX1lux2uq{&r(=tUcEmNX|w76-Rt^q07 zmMLz5$tA!}0S*^nhXC6J*e1YM0k#ORD$d(xfnX9~MSx`imIOE~1j}~?_>KVoC&2&e zFtft^M3B)6H(t_`TR?F;fsQLCHHPb#?_1NRhzu5jVAF-VPqF2gyV`o2 zEw)J01?$(=H%vRMMMytPTV3FiqUmMVS$^3C_icl|3yQB-@JpMdFCPvnODtzMJ$b{w z3*HW~TPK-Rl!j#d{H{vs^keWM>ID|8;%<{5W(mW1>U|c(EM}Oh#vu$-w1#uR&^6&g z&NCo{@$;CsT?ocg%rJQwA)X?J@$|SDxPM&05V!iB3I>|bFok`025(slwPo}cR#)OE z(ALu{R9&wmpK{AC1;b{Bm>UUkV&1b~*cB;ORbo&q|wuGaLj%N3Og=tDfJgNlUa^;2+?4 zTDcsr_TiU$6|QjoBJ%;3zbaI%ejgXb5EZCd9lKtz{ z4qB}o0S~0KR+F_YsLeK9C+60;cl8o%y^Z@@^7K>314SIpf+r2#sWWu^Tam`;E%Ka} zg$MyA7^wvY=EgD1G5uL}nt zyQ-!vf^im(DC{3%LYQ&%fqD4-pDLPuUB(43Tos0&MDgq|Om@p;3rPd%TcCYwAKSL<~ew zacQj>0WKmIaw%dUkJUq_iD%TbgzU;bM2Pu9oM$`+TRUK|r=RU_EMWj=kP)RuFav@@% z4k}-GDt2tpBFs!(>3ZyA@Hc9*;7a!|*C(!xa9epV?(AnpMYUlgYX~1_lK8+ zPYF*6cRH#aZ#kAYCOSGntLaO78MGjYugPbl=$iDlO^$kqRN{?t)pi?EXOq*zPQ|I# z_JFhCSnqYfcK}Z!zVkcd%_{&-B>9oGajGK<@Pl}PPNwuIF5NlQEiog$C83^xyV5o;4$Rt>yLA%zoScED~MCW z`v7|%souICA*lo4QRMWIzHzGhD&UdaRgpOhu#d#Nycii94mgwgv4fM~KLL**65PkB zwo`zIm-@2e)UcZXnOifkX9FHeDpF-c-xKf<=2cU*VFwX4m^!qKHx~jNL@LXCajMA# z_+E18zM}}b2k=0WIVc%ba24U|pu zud~SP+~}EY`!q2}FsW4-DT!HC zza-N|vZ&9uQh0uat6d4cb;-nD2jRS{E4;7{FI)#F0@ML)SrE4&h?}=Sr^?}7m{V9e z+@*h=`ZrwvI`pqy|Jq3I@@RNu8V3&p@EYsIVoMQP?Z?#J2g6u6OMZ~W%n)#l#e&X@ zgewNdC^LVvc)p*yorUvb_IZFiq0Jm}3f@DgSKE&B;e3|5qa_o6^uvh-Aldp8iG%|W z*?NP0N!E{T;JXGlcVzibIuCqWMf8y4$>jio+@VZs(os9?E&tMV>Y;ZSDM?4oF^4}C zLx!txL&IrzycbEQsP5a4ht?8wo-PZH^=X#Ad{e;+1?^62l$WFveW+wF05 zp2&uma}sm<#wxOWWnv5RUglAka>0FpRONQ9N2cthZl%9rQw%Bi;1JwsFibVPFSSC4 zzrwyG{U`M0ey_Lk%GOI9vpj$yW%-HlPeI*NTfgKqR|s=~VYEBnQ-4~kX$0^px)M>7 zL+Nm9u?E4&2I`uMLe$w6qcLYyjK&;UF^D;s;#|ML?{&O1pZ%S~{?2B9pJRV#vA@r< zzcWciGKSlK87p)b+2L<6j2!+tr!1epEkM~rooA!(jbdMI;s_^R^nT`lK6E}GN0kYv z5v6aTe!}PTPa*#l&<{!Pfoak77Rnkvk4~ELPdqw^f3QC#6Ma5Gm(ELun(Cu8eU9UnEN@RJb)ll&*XSn*~XSqICiEhs`Lu`kQRfe zo0j=VX){Mm4T&$)9k4PBs*xV^MeWvdK+u(be+6a?CXx0werLzyP) z--q;XHv5wFCw{XM(hcs}O_MlgImKLm*aQ;j_RVL%0sA5I_ho+ z)lrKXPRp9U0A*+e5F3k^6IHDxKT*&@v+nW-F-tjSVhFQ@ zW6DC9MI3X7V&YP0DOVCt3gSs<)C%r`QfNidb(7M_ydZA)2LasWh5^2^UhrAW<5)~` z4`6<~b3go{%{+kl+A)G-mXFjiB;iaV9HJd)g-uToH!p}=J|VzYGbR|vK$3@?$!Y=n zB3u@xU7D}x@j4`{b2%+1gqfpoFyC{Gt5-18b&i=xFNGNc|h3f8C@b z@-5p3=->YOx1av)tAG2Dj_XlD)5}mF$?H)*_ascg4BAK+GX_lx;#xE*=QwzQWdT== z4X~eT4AQaHsPy;Aq1El;rT6&fUH*B8f8OSwee6RHzJoP_h1p9Y)*ws~`)H?zInk;O z)*!O^oKVO=1?)o-zwvW2(W(KBE=5(Dx01|K6xqCieaQw1or_cc$1yb_%u5_I+>a*g zPf+ZY)Y0bv(#|6_AiW7{gt_Ttq=Vz(Jeooh9&eze+CgwW#t;!4G3kDtgS$i@LyuI^ z(Y5fuPL~p85b?2tS9w7IGmvA-LYV#>liwBlD;}yZ$3%~ZQ@#e%e)DK2f2NgEf7)+e zE#YUnGov86ybwP0D4m?_jxoY<$1D$FW^&Bs z5aub|Z_wH7sQx{|z9e0y^V10APOa1_hxG43atkTTYCQXr1NUKO{DYQIZN)LJ(Lqcs z*+XNOSFkVnbQRpV?M19wIlLfpP?nc6qIOq6Ez!d*CiCVbqDKzJX?MY#M0mscAC7zs zS`ox04ne16xGOH(wR<1UCi5JD!b)j4IyX;L*8<-0peHIwsm6Ft~@0}%(Gp6t_}f7-!qBe?SaI)k-v z9%`Z26gX&ZIWIAmoC@IJhMY0AxE)|xv6Fh*1G`2YIC4g1J5Xp+ifOI1KwvhGVBx55?tE+~kKkc+8d?lrj6E z4zXs-j{%TuOhXku^iT({+46k=X|{Y90FS@$+8>b1;k455-ii?(Ns#C4r;001T8gl3N%x5 zEYK;P$r`XRYibI$Kdi-jt^M52vu|r(o~=nnmJ)av{2v|$iwq^w z={zD~!o63^_oX!JJr4Zf7jH#waqm$m=#8K{h4L;zyA-OGXyXXHDP_y?QfwHJogn;e zv~~nusKyBSRgvTsw$Fd4clOF8<5?-FhClJ%KYL|@*@Q-)QjJn+GgD-R!bYP+k-3VD z<)WvRdXMdokJsPIoyYzX{i0CqCFo~`0%*osUjLSe0$gTQ5?Y|dU-Tg52*?TiS$r&N}iquEQLy`#hgIdm{qPzH3=AXs)aqq%|Y2z;p zD~-Q|tF%f>oBP6I&FZ*;A+6>7F;&(o7bCVOk^H`D2`*1Kzo2=1QNhKi{S-MViE#c^ zS<*{v%V*khoYrQp#HT<{Y1t$t1@P5c+L(z4C%Y zm6AedN>u$u!AMParS`c(<{IyW-^_|Mj@ZQ&F@Ac5%r)0+o=c|g;zAy$o zG3m2Ok-#dOyo`5gFFtU6S8oJta`xgyw6(m#pZMk68&9>i_C@wA#f11$rEETnTaaF{ zAspk=NjOHx#JhWR(q8mXU%{J*`9G>G{)0q*%-T#ds0S~|ZP<@hyH zP|VviaTUz3R$wZF9XH`EboZyNH{mS|ZAUaok-m}$_w`!#sYE3}g-ezrLd|8qr@aLA z5#VPaJU+?G(2!<7d?+p1aJ6fcZQ4%qE9$gAjPRlZD{wGSWY*up z)qz6o6bi~pWBQw!Xp_G_6D>$+!qbj&-^@gXw#_2$Rz;e(+T<5SYVS?csoXzP*%Jej zsKX$9Xne&J0}_m|0^e@SOcLVH@*nha*~!?f#3m(Pp}fZiAb{1l*-d|XYygV%H3+xN zdTc-fWgNCxq0LO2)!VH8I=r3t)?_A#DD>EC5NC~gTuLhY97mX5gZqklOp(S2nXP>v zk;5WufpbsIs(ED^WR0RuhZLQEG*5M4lBZMitANF@lC? zq0k$*sRaXmvYr@7%TSjHXKyX>xq112vXqp{11{`oIQfg&Ny2%J%rL@_K05|Jsc<1O z?Y{A-I?`98oY^}b*OmIYrPWVDMwZqxn+3V_ku02#dy*;gY!>8R5@!{kBT8k-FJ+b- z>NPN6{krct-*n1dy5g4qof~6~e;bF5&Bg-b4x_K(Z}+9ljhCz3*tv8Fs=LzF82^K2 z6w0Ao<7p+Hs$4t-(VIzG81h^^B;J_G{~Ldle;?q#j{CV~op$FhJ)HjamWtX47dqn3 zU*etqbv7(qgFC@*t!Bc%inbg;XDO;eO2mtC z^z8>Y%DO^ZmMcq`&#Np;70Kfe+1c%Q=iS%P>Fqq!@dquh*77PXlPGf`Znm~)H5wxe zhoJD`5nR?p^tH-Xn4N2po{E%l;7<7@NFt)+wT#mejT{_TLB@5oG`W*);}CVY1&uhgA3 znXE`zgyhg+c|AE4zcVut*?@)mHhbbu9C-MnE*D8HwEB$J$}EP_k7)~rB;f<@EGVC2 zJu?;zNiZ|yST(Db%TVQcqZFnYvEQH!gXv+e5K=$qV(L-6^MRXBgr@<%N% zYWcmC6jM6HLlbkd!SMSDM6)I46d5Z>5DaR+JFzfg5qz3k+x<}Ejt6(kV zp0qh+x(o0|-;E0Px>A1P8BAqQ#PG+ai$a~4|9{7mw9q5~?m zI-AwmEXHN|ZMi91`)!%8HREG-uo`x{$OoHxIUbwTQdZ)jORdIcb1i1{qw{6Ed+(wdIvC}#9lPPr~T$D{Y-2K@`^(EYgOGHygEGG7wmpQmM!mUFc%q|?PH6+?eO zQ|3%Ds*2hWM(fTNqkIh8EHKq#tu_lWJ63`e4X^idNzDk*;Sw$-YBivSU6#qOOf@x# z6UkKNStv@R*8Dp>6Io3)EkZRl`++nJbMU+)jPi4v6r=npj$0h@Q9nl=iTX1ltf8m@ zQ2}ul%cm=UyEVGw6_jy}(de#ke+7kSZP3z^+gn;77TVn_#v};eLljyuCXpIeqOfH` zWl4B2Qj5hPo%VV_dF4mQePnDxsoHoS8bhv4IAiktmR4wK!u6B%a=Ly@M5Q|K$z46@ z#)>iLMD$-OE61QDI;g~lEU0Zd8VHU`TvYT25@gH(*ucPzQ4UVv#wdrB80Fw3VIL~P zjg@0i8DgA+No#}zsNjV$NyZ>4DBpo0pxG~sNig~=P-la_3Zz6?9(^cx6^<2Cf&0IA z6+fjCMW#l`LPg5NWVCx%jftlk%SMehi{&BRjjM5e{$Uu0!7&3Z=%8#^jbr+UDpL3m zo}2O+qDXy&3|1tVXZv7q9mf&=0b2H_qBZ#6bCr~o+#c6cyas3VBq~xEAw3nT=dO|R zNzf$X6Gt6PKK>J0&e8HwDJicRX*-#GN3-ZW{}%pbQkrF-a5Z<4e++-QL{!DKks?d9 z^K;4)d>)0hvOJ?mUWAk?QX3&9illNbuO5U+T~*JXRhn?Dq;-QZsmno9Qhm%SE%Z#EGHLzSY#5P_o4q*!ST?dd7a@fa6?mRtE&L_8q)GEE$`Iw4lSo>IhhQ1 zL893#dnhh<0gB>w5(#Ikl$6Ik6}C{Vtp)ySu@Gys7oWwQ{s&XOq!yTqA;(7+pGAAf zW+9uU{sO;^^I$F7i^!VPS~xnv8X~o8=bcTW8lL+n!R-pP*$=I&Gud>5DEI4#vh zN;B|ol_K@9mo0!-i$S>!dSadhz}(m1Q86DU)S$rz7*S)7*s%7Xj1O^{>`P5!5C@T3 z+k{-5zPobIn5Y4@?zh}CGA*ZW%E`0CP4o*=USWt4yEtQ9Xfix=-bDq3(BujFni zDZp(MWqDg$c53+sb$kV-X!Asj>2<~{DIB8}NzJjzNJYvlLV*dkElt@%5rT52_GWIpBHUCyi(zBDPRusurZ9XIfNOsCd_FIDCipFj7vvLU+9x$#sNM#x%f;ad>e zmRfCijn=9LY{N60!6R-jQO6lP7Pn5zwOX25R%y9LOEh<-|7tB)NlAqd;dfIMN4%DCTE=P_)UrFB!p|@KPiy&PVyvpU%Qsy%xv7A3IydP=#7qi3V-Eo?#4{yeNFQ}O6 z*{ql_zO|Hd+H1*yUFeF+1EtV7OvE?SWo^ia(uW|kD&w`%NL3N?2Su7AA_1S#Ji70F{V;BM~kR#Kn4vv`aFEdeH!Jjv2d zZEeOl*WDP2h+b~S1-lzb#zrM+7>16`UO)*H&dRdX*xH#|HP&gz-7i~?hN;o1DcM$n zQ>$#X=8v~J&7L@753}|??ltZUtl5DQc!jfg-I>ykcEF3;& zD*7#F7~G=3R2z&&gfSP*oUt789+j1g01cJ|0MW1)agLElBz#yDz)octgSDJc!T<@M zS2;Ici*iFbV;g7uG9vm;DJgv>irHfMYfMq3$|96LhX?fd*K4*}mBmE#cp5h`HwjT= z7F1Wf9DUV^xQT!QW+fT}5=2wMcwCHoxs+6Y071~%<8d$UrHTai+N4a8ya;(tk@Ih$ zN9D-GGi|>g@4c*>${xoBN^O^?O>2!dE2z!ZEcpreahCIW>whzAo?naXQ+{V}6uvJl zL80hE>*j}-z{2Jkt>tfAqBJ2e8wJZ@DEJ1#W$YycW}`64i;y0Q)LH~FhW`3UjXdLj z^m;A~8Yyi((uFRfnaPA=S0tfQSib~wKTb-U@cxxLmX{)%PESJtTV9GM_-{o*5%Pl~ z;RyL&65+m}<-cfJ>|&mOjUr-3>|&fdQISvvVjqh1R3scBXq_OH@S=4hWgN}hfa`5r zhi6f;f$zLpk*R|)qe_ttk_fS0SwbAxSC)0!vQ}A!UfzPPzaLeG^HDnNM{(}_layxa zl{~9OnI2ZC>=Kl(P~#=&A&TF)1P#ssQd0Mua8rpJae$U8oWp6E0C@zd! zi+$i)7xfI|B?ySHER&#|Aac!tidykkVA^3-f8>PD^kW}I;V0cV(@GY?xdHN zJ+7tciMbOR^69BgwA zVfW)~Gixs)MNDhPzNHv#)`bn|zqqkOq|6sRg6<>p1VH<0oWIxY5;GhkP60 z%3YBg6)AiZMf@oI%+#r7Fi>SaJ;DHYAsjEJcf8H+FI3IWd(yZS{ud{TF+IB6MX*BDhHl0{`YF96EP>K z(X3BOU|o%GJ&jij-Z6s=cD?Vmy~QbXuPC5iMs*NrCO?g`HiAG|uUYl<}ww z6Dk~vYD{YNwiZlfhI3HrZTLM{6Rxl6G|n$v|B#Zx{OT&pSw&hSRB`Xg!o1^;!QSqSOTL!39Nu)bHf+#ub2=2rxD2kXyM7*5#)sRbTWpuq;S5tOPe$Lz&;1Z`(<2do)4DO1K- z3k;{F&++-Lmy&9E6-32z)u^<4YfFl<6mn;TEr<(2>$GPPvT|x2Dv~{m5YNxUHAt=g z%Mxu3@YuR!!LKZqx(Hd+`79rQW975e=(epMqW~rI$e%L3#HOg|b?wx8>bX-@?rUln zb0TJ2%(F4~#te<=9B2uA5HJIe1u_Cj?c3TPZ(q}XQTwUw2eyxi{(E#?G)3n}-y9u_ zFOfF*U&dy!IsO!E7u)39=_~i$=Nsnh;%)JMfSsZr^JaLHJZ+xio*K_0&s5JqPmKHT z?m8?m%6H%Fj&=R$YKV5d?3(M!aizH2&L(#SKT7_IpESOO-Smi>Gzi9WvL-TEEN;#!X(H+Gbg!WlSfuT~r+_pfMF^lUwb0x6(<+AE*h4MpflEXED{ zG4iq)ZSmHlIH7ZKX#&;rG+C*obI^#rD4l~wl2w73j=lT<1I36eNL9EFm~HZLgf3zG z6%(#~l!;TAWmj^$aw4BKIz}Q9jZbgz*{hb}_!Ic-p;h?F*oT^J7O+{~r&B|?-X|o!i<(FZz7MI0t zw^^IhVqok5tQqL+B8#{>e*y& z;9o$9`%DuGrVgR4WU=X*RbdUdV)X0pTP$^0YJW>@#7Ka4D7UE)Q?2y3+MaG4floC&>GQv#8H&I-w7WPj2Z|!BdIP2`jHF(kdCsCWR7IfHJz|xwQ zqs@9|;~KP7Uy%DfP|6CB{?%wVCqIOuO=>|d0;N`HF%f+m^{GayIC`v<)KokK=jl>| zF!P(4T1y#?w#At)HE0prtb9GXh3tm~gWQ3WNt$f<5P~rZ3h_I9v#th1jXd*_-Gt~s zTbz>--v3fcX#tDbq2!${$d#q zYGZmy#CSlns!P~@&6+P^do^pEzZy-uk47X^-qG7hBR?9^!}vW7|7b)KuUlf#Q3}k2 zwahUJH07ZA$q5Z?>;Oj!bZt?9*0S##`$k|)b|h`thr&3>jm+))P#C*!QY0@zQ0Kij z0)d_rT7D)a)z3yBpyEBG@f}kn_^3@jRV0tezIymXUrD>`ag^vQq%@na-~vaPx+#<@ z%OHii(4jm>B26vDt8g?AvE3R=6V7jyEqyC~OlhmKG~qm>Y=sxlc^t*p=KhX`^VPf0 zYuTb@vzCaaOZ&H#d+;FYdAPv22MJx35(Ig+iUQ9|(0H8dUFor0g5=L^uvCIV2C8Oz zP1bC}^=DPXHS<0k%CcG4g8cI;@`}tgK3~;u=Eg7979mtL3GI5vx*|;8BNVsJbzrSF zE3;YpWcc-ajr}-m)&Dj4)pBr1YONf?VNJBdx{KF}us}a6OWUfZX&aSwLK|QDH!~|) zSQ>8@9^r;1tHST5c?(fu`dv~|Z7oWDwostX(n7YD$CEf`hjBr53o()@wT1v&+uBg!^!hRwVQ$d_G-Y)Wdj)4y`Xj z^zec2(c4;Ilz>Ux62$WIND0u3V~;HhhF~K3W_V!5;^v|xV}TTOs!NUHw01(4_9&uL z?#XBz%P^{Ya)L2ZgC~AMLB-%E1=<)~^iM{m$*Z7Q6aMaW-ai>bQFoy0{k?xOCK6mB ziP3W+0+prBl(6 zuJ5({mzL+X{HK=RP>~0BmhYXMLM0wtckMom5*2%JOY5b{Y(;ZI5win|Jf%|dwr{jH z>{~RKWr8XiE+!|MffiobI8xwhJT(f1E2 ze6=Xv7oGN-`SHv)bo^^#4&sMrt?qGt1z0QJgC{37b1WKuXSbn;-g^g5D%qh8D3GOA z#H`n?7ceX7e!Qeo+tQ!cu`!F~7guWL708E)w_b?HO@NI$xEq6QJY|kG;n+gM5AH^# zghvcX^((c7YI(iC^7*r}1$fPeA{d$+dm#zEJt<~MK~s4G@`SM$642X|JYVs0Ui-tG zsxHITORZ&|i`>ES5BvJS)jPgmZhFkdI+;$-z zbKa$?>0O=7*rY(pE(^S@z+47LyP_tKeu55lt-<^Kn3j)fIa|v|q@=QUn^7KgMGK;E z7qp%q?ZPYzLKz{oyblpE==ISqlpiZ8?xS5OVZq@1Ts3Ym>Pso9ih~u(a$1q*2x(Fz^|9`jYwOP5ryUeR-$D@;kDR=-kBmN6lrv0rhp>H6-o9?cAJ&A zpfO0n`Vm?c-ePkZ7~*$RlU!y6RP6k4&@xj9l=i-b$ek0v+gpe+I(KV{-%9>ptK}{! zv1tM7lYMU?D(QU3JS`6$Tv+Wk&+?gt2=ewX;pgh99)2wVcy7tMUSRRZC#mRw!*| zO@urpiSZ;ha6%yWN+qsP0;>=hc2lCO5?!RAw&H4(TkoQHA9mXXrD1K;sdsVaatlz^ z87|!5WGq&u*if3 z#T73{h?*Be_A16Pg5z-v)`WAtvgJj{Iz>_!oz>p0#n3vHEJqd0-5{xD@L(}(SfGRc^qcHuSAn;KF*t(Wof}2yo2A*$1$5%qhgiTJe#Gy zXt7#mZ#)CP@MV}sEVWu*(YpN^9Iw@8$?H*)NES-5{0eOruvxv$@=%!2sONB$aVv3- zko{DgvE~u}G2alsf_E4Dq%U(LZmK{~=djWm3-C*tm)(XLYYwJL}3W zVvZOmt`hBx&x|cbsgY|8GCH*Thg@+Lt5#fnQM4MR#P4{IPqRwS1#!+}(t!?@uEtPz zsg%^r9gU-h$&4YqgCiE_4B;Id zvGfMgTzfSSU@FrPZNlOk=`vW$K~mDkX$`2P#4OD9S%6=PHQ_i(YsD?_FRqM$I-0+pKnSU*#wKNiwDI zC(_})Xw-)I1wXbI&0Oa;NrZE&mRq!}(K4*%Yjoa?b2svY>x*uF8mkmp%9E_VchAD> zyHb$=PsQ3%_YGRRH_+C)Z;&l{U{aAmk_h)eWeIR7u2T%(-CxmL!x=&xD^}z)MZyts zjN-~rWY@GE!Q(7Lxi@txn#euNmiI7_yJ0eRD^Vwfd0uumCU6OUeghXNTyED^;rC)D zyzkSv`O8sbn*1B%J@@&3uX4|q2Je=WGxI0UC!SY53q5yv24IBaAMQQw7u*lKZ*|AJ zesUdlZF0>+thtZN>pbmz%eluk5!1~boLQ`(kzJPsKqK2Gn)@1GGoCN8m98helsblGdD^H<4G4cbdEP`<(@(d z8#-ePEE@(|c%uyoYi$^^Arf(Kp6zB>()TV*>fwl$q4W!ru!9P(ta<;!B=jT@%}^?} z-GUO4q2mh6(d-l7*+>$;e^HtbMR?UrMA}CCP^kOPE7BYx&5EQB+@^fcrs#{@;X8;C zcZ&(%Ipx(DA^%V$nYYZ~V>^Ak6^m)?^BFxIpW7<6A~_8~khV7LN;IBO;&CPBDDjvS)K-e1(vF5*@wBC37i!Gg@f`SHbhu^OS#)atYK)rh z;AY-qD3CuM%}NQb zqj~S`LX8-Z62q5hRF%BP#X+))N=Kw+riu5$%%)anYeAxbpmd+fdgn z!ReMMP-la31!8y)7wNG?fvF5Sm80s2;msVRmdkrlDvkFNk=l9Qtc*3IggTVtT*KD` zRPa+mP?{fG25(p?L8=7CP{kYW!ZT16SQoC+5THM+a(a}nQ#+NzuIQn%{0SZ>-5Rqv zW*T;F?ilzwuqW^Wbsg7x7-cU_zJuP#y*7@<8YlhKHRd38?)X%0k4>RsZqhyR6Q12C zH15ftcoN7;v}d0D2~nI{l>e+%ZbCDj6>Nblead;6g!I|Mi~ps`kfzGP>i>9Qv@uAe znpZIT0S}O071{N>7DXsrgDLZDL!E~1cCSL5wT-8cvSxgMeU5NJbs;Vz3xDcCHI@kz zK{>Qw9z@9TLCQ4`B6Ro;N`vVp@}2*Wn%sf;h>F~=NL7T)z-q7-)bQUQ!T95*miWqk z*WgKSYC-X8A>loxoB}-2aDs6X_259ftE?F}&^Y5HwvirWfk6t)WiSy36rLw3lTT5| zJ6S67xFo_eM@qA>JIDHz=~0F1FF}vcTGz=WbFQ2=nD)636 z!u%c<)X*L6!`WUug{cbUy^Hrlg1Z!$n_b{C6A^tx)Al|y9!jDa4DhI;hSL=ku6r%z z9!t5~Ql?o7wqxaI-LL=wx{dtsuadyPdR6A;3^Y!dQw#^Gr53wT`%(+6=A(Qm)Qml> zl`5PL^I^U!O8qgygkr*JXk$8t33S?8EPK;w9j)dbPAlB)l|rq~lOFQ}FwFAvYnJYc zFt28TOZ?m__j-G&+*voDb)Mg5&w_uC|{Fav7-O|<$v9$A?L$YcyhtE5N(reM=t(Af4>{_&jy@M4B$URRL8mJLZ z$}x8g;$I*T)5B$Mhl=_RF)2;c*P-^84#InfxfNzqqnEJBnl;;uPW8v8sT0sTN)WDE zGZPBBd%G;YZ`JWb==2eBtAvb|)T5iRf2}-*?{&>; zE@4|WYqJ@p{_Sd)`4y62%!1dTx^BRP(5V~_l3EORGNhKDit1Eqjl7&)YIPPvfXlc+ zpy`}}nn#+3545y8?_|xIIbJ2LF+8zCYI&D0)>kdLEo3h{C{MX>oN zh;ZysmNFiXQ1#+5nTdS9F)FA2x4q9Z%JCAI*oLtj(_Of0Fdc*f94xsvqa0mK-Vjr= zkj(;I1J7tVfL~<*O=~%T7rDQb*mmjPmZh&Ec}y;TQ<8vvrp|v;l4MrN0U;(d?BH&U z(6P3gD+j5KJGNPCExhwe$%YoZ!n{ticmry>V93(yKSoSKTJtO>yo;#Axe|1FIb74} zTnVN#?Lb*@*|`#oS7NVIwuN}t8fi=CZ7$hHVVhN1Ot_zS_4jN|KCS zO1!JYJ5o?{H#BTU{|)Jya?pqx^*8)bZz@yDBn#|NV6F|`P@usAw40+(GVxYvGPY2` zS0$+Ec%R4FU*Q?YJjO%GtPSI_IjQyLZG)v2Uc=QfTWf5;4(1un+L-nK2W~0)57he~ zxZQe=iz#xIB0=6ZLy<&DM079eh$@BBufenG#I;43_aNx(LZO&G4~G21d2@&iK-RbT`SF7uoE9@$9 zgnf+tcO#ySt`|cuW_?E-YI^ypi}%}snl`@W*q&g z3vzqwZsg-fx*$)ur%-+uv`jnH;mX)1+>;ck+GG1nRHQjV##2T&&SMj#q+||?jPJ&I zEL)NKQv5iIWGWKmpNk3A@D4iJ-EbXwHfv+Hd?2Reg}Dq|F%VNNLOfZYHD}ZbzKLZ6 zOM6frnx`5bx1clXSVTQ)sUqrA+Jg_P822eLLkX;?W{3sV5ERF2HYbiPk2epE<#B({ z*zy9(9E&o0E`O?pEmY`5XH#lrT&_xO%N#_yq*k{Ixv8@(vnNtVl-Xt;c8kHQ4n)bk zYD*cjrBS%DlvAyAs<;iu4?l%lVoi9yr=_>y+`AhQ7JL4-a!fAxmm=v;+dk(N3GfGG zRH|kw>fVB;29E+S?2hN!$Z=TKL^S3!v#pWGilr@i1>Vo%?)a^uu>&C63M1r?X3+)% z4Fw-51sjE;S+Woe1!9F}4VSPLR)Xc)7>gAfR)VDzmtKyN9v@SIr1V9INA^m`eYzGZ zQWYWd6=`O|nOb;Wk{@>R`8%l0&6_^|UptWe|L*ZD+rtx&ZALN3-E*lmbVCzBYHiFC zKi!Oz9;5hAH={D+kBl(E0-2RcGpoMlAW^SRH{)0NM7gC~M&UW6Nb3~sE^M$1Y)mq0^u6Nip*al8SzZ*3Owv_=#0( z%SL7CeVw*YGf#D!eFC?zk_t}X7Gij!h6LCd0{}yjQar1~GfLod0c;VHsyoYnA;AagFh3P-LZAn(()mHsSYc>6JMW6CL@bkde;Db8xkS5*j8W6vV)Ejpm$%VqC0TWa%=I!mpxsfb=+ zc0iw)!}jP6Uj{Y7tnzbkq!V;hff3w%2@XeaYP9Vw+^?#mnPn1=!nSS+#wv3-BtmBh zgP00cC}b?xa+#J(wJg`NOv@$a+GDs#;qPZDeU*ZJchI5R03(8LeKf1J8P!kfYnzkJ z^YZaDVjib?5h}`7Oyo4bmrp7gEpSbnBeald1>c5Q^#O;dq)xf`sF+fXx6Gw{cV-RG z@X>6iVpM}DvrdtJ4;IF&1I6BkdS2rO%p=lj&FxtJt`0wy`<1hUs3E~FRmw9LkBGAWiC@z)Y*u3h1sN@Zu{ zN1x|cm2dwho6dK+=Xs`h`g)>i;i0RO1Nw6c$D(SYN~5OH=HA!D8M0m6O@5Y9c>W-z z({nW!EPY0AMiW-9Nnq6s%1!k9j^2zmu3VL%9B#3EF`ExoQavo!xIJN;33VG@YYxo8Tn~Vuk zIL7n2(x$xK!|D8+rBXc?WFw}$jo#)pXbImmHVTUvUj zrRCWyop)A}eznYwq&1+O?xKb?TvH=|{NdBI2DIfJiXzQiWrv*#``L?Hes+h^5?MDT6I#kWwhH z>KX)1`fo$mjFdj&kS}f1sP2@K_*oq&THR1n~;_>!VUWA-hq?U=QDBEQI^LeiE z#eCK-w;czLPwiUL?>)zA$0NwRiBWA)pG9p!=FN>76x9J^w(p76;t_GX=w@@?{`4CsqSv~jh*8O#_?Y}< zZ>L)pa!NgIQK~TQ?Fte-_Qf z`ZXx8r8bUdC9F7$Fh`JQB}le*yv1?}+R*_h;0jlu;VQK%UgsyZR^A+nHR1n;4iCVq zU2 S3MQlCRb|32$}2BIe*Pap#{%O3 delta 64648 zcmZ5o2V4}#_rKZO-Rq4m9NZp7=^QEuHo%Tj?H!0>MFCsva#&-cf<&Ggbup%@QB0zj zVnI!ei4BaYCJH7oT`|cozf}FdnLW%efB4*+neV*mv%53zy_u@k?YgR}zqK{NK1+-$ zmF*YR!#|Q*8{d_piM{R#h?*^pNXd-a3cI|u(MiHQQma;L;mL^ViLV^9WLrtv%Mx!@ z`c0HmRG`$MMR{T-f@YRc(E`*jnF2_@g}e`)*&XhF!HoWieXL zn#0VeO`T0G#%N=U!DRSAzgTxvKUlj{H-q%jmUE!(uK8WlLp&y~71jzpRXYTthROA5srEFO_eIhbF~F}oXuPp6U`^iQ~5bpH#}HpxrkkCTWN%MHuNmaCSF zbWj^DT`hN3XX1qDy&9*C@#*SuDwV#jI$EXD*0~>5sWhB>ph_k5aQmrL>f9+va=ISN zRFj%jgs4(fyHu9b7O>2=^tPDHH_V64Rpvr-xanupSyP>9im97PZMv*#H*$vS zhF1+w7)o{bNt*s+)h_)gZZFqccTiVOKGNRPKCX?@+|g{%gp1e3g`!3HSlBDf5_onwK1J{#WBfGKSr&K#3yy{@EXwXjy)sB-}t~!WaZv8QYSmA!>K)rnQ697tsA2~=VKSk=u4w~`; zt;{cuq3PbFjW&7$f_gdKx3aZHAf0Oq%tEhleq1P2Mg<1a1_a5W zSa+S!%lW@(pgajty}$k`l|N37fB(t*v17&v1Hx!|w3^<2{imM1p^vH$?X+~c73*CJ zwQGAti&35UUQQKM-gE>)QGB3Y>>R}RickPK_qu}^#Hhf3Sl&D!>BXpke}I}Rw9EWy z+biG1&Xtxw7A$o4eBK5F2&v3Y|M zeE4Bbl`L(vlNd3<#sa$^r-4K(pNlcJS)oLNmA?n*#Y783$mb3cD<+z00IFsMN^$}w zz-SXgY4+&0y>M)9G`{dHp8VC_xqqgspRc}<4+kytKj1X<|8U*V^#AE-f{#J$56k^hn{q>Jj5Y7Blbh{q}E zb`!bjphTR|P&VHOvlI)^zL4%1|hAO{w9yT}SjFO0^*Xw-Zb z^&X9qrCAaQfsA($zK(E|>_Qp=O?#GDVf>%8pm(sK44QTZ)|<7Ek{ z6Pi!@S|M@&XoDP&g~iJoC@MS3uzK>73~6V4HTy@0$bT_lEK*E3uQ)`Wx`w7{VnEHx zM{V+TOsJPHGB_Hk6SmANv%;_gSU~r+l(~kX1>a)VIIQ+PLd1IKSVx{e(cfV&_V$ zW%p2tIH3KNg+AcAi22@LPrEHsPR7I%G+e~o@-|Q?!wVHt=op1$6H+HgmkKBz+<&_xjA?G5+wmnjTV{-+o{VRr}_DB-pU*QY>a=cuOZ&4!gJvjXm zPfv*lEoa*1c|V$ej##P#N3nIn>TI@7fFnL)0Qe;Gw>l}S3MqO{>C~Ae_|3;JeD+8& za8eJjh4VpBZstUfCys9=gg{~D^5Gu*7UvO9S`sfM!ZpXFSlC4Nh3iR)N@ZFW&4H$0`c>fY7GYn!w)G+%0B#je5|LVrF& zeUKw$AW^CMVxhz9Eq5Cw7weA9Rq>!l1025d&rNl7sZ|%s8TNCh+oO1CD4C-AQ6#RtPv( z1_a4T$Z6$N$2`HW>Tf?z7r)&DEGw}WY`?H|*&4Pod)4v+Tb(`4)@Qpc7cKRcbCyGv z8cUO9qvZ|DO1fejVp(PTX=ikQK=01tEZ||x|Zst zDwQ~_+Nx4niq}-CRQ#*%S;&8H?FyvrYtxbcg*7=i3zOD}xaJF6orN@XwF7IFL05sA z&DAMb%1L)W`mjs7)6>!{+`3 z&TX?pq2b5JLxg>}Y~2M%o*o?vjgJTZrJ`oQ2Xw6 zJ4mlPgZYWrN~rvymmMrYU3}o~6+bt8wpQ8nR7ebkc#4Or9Yo<8e-H zwd??m)#!xb*5!lg;}vTGX^)+cRSdhQM~`7AN%uc-XVD3$rDIl}lEn43%iM(!D-Xc(sd)v}8G?r}Al7H|f4p?i`h zu&|J;HpqYPb&zaMbsJg-8iVCT4Ee*oEMpj~JgNzVySchfoD}0(a@I6j!ImI}xGh;0a5iioLi>NeS;!FLCULUO5kbKliravf^y#{fbeYpZq8q3ERY5mh=LDw2AT zSNg@*6@M-2>+$t4ECHhlRPHSfufOgP1$=Gm-RsH2Tu;?G^06gNRcYR0`o>gb%rt2= zKWd&6u86hx=ENVi72J(tcc_&7XTT3J{tuuk#($iSzoXL8Se57dSpQc!-f&RWMt6Uo zdlrZLFIRiy1pg8atER#SkgJ7Zo&C?NM^=x6wwLvG@a^oM-c6_;qY3Qe*AkJ;I(M~^T%BNe7k|SbW&N~v23!8ut4qew}a^T{$z#`NdZX)y|d(zDN~cXcI(c= zCImdyNdc=eyNj1_2H#Z~dl+8Sf2H4pGx%HW1kG^qL*bOTLFmSBQ{Pe-a+}FFs=JnM z=GCU3Op+0tuE0)uOu3i#iFgSLtjpQV&GRf<9w6h)ofnY7>x^qUdhLrv0q4~ayfR?0 z=M%|aCMHZqf65;mSQ=0RNvi@{JUy2M^dtK0>T*qB?m&D;!>SblfpR;(IrTiYdNH&t z53tK2`*A}L=LZ{2@a~ELA0-}ct_TQLyz*>hh&xslAhr(NsIpXBI++{I{Y=+P(~Q3v zHyZ7RLxxO!i~bSa&$=}_f9>F35B2eSk zC4*ZK0aYb|kx*17TA`*}UyWyAN#L7i%W`6x?G|{gN=w_LRW_nD9A;&80dF?-|>o@AhlIt*83JZd=R(lu> zuet4~S05z_s#f(uoL{Z4hPXU8TJ0ZkkQ_as2lPE_kAuK3?cJdw%%043N`_U7>s_8h zU)qZZ*v^C`6C04X>@H9FZTsUKSdVscabj-$d3&;F(zo{i5NJQr$tCi=lGUXgw7lFe z-t+4B_P6k@bJ3oRFYLm7om}fb{n3uUQ2d2?KK;=V>qFAE2~vNvR+dJBc!Oi(!>G6JM-<9szlKS?O;GC@I# z-Eomnd^O&xA(sCSR~$-2*xFMlOj6RN5W0A8Av;-h!^Bp5YUUpCdv_&xg{U8i&;?-@SuVT}Vl%D3kfz zby#hXxV%^|cfX6GG5Jcf1NZ6qw;V)*%yB4uGx(t-?;eI@<)aLxt2lYo7Y^JXh9H$t zM%T>>C*HlTm)~Tlo1*ofW7_iFUs~lZUph!<`D+TwbI?CX&PD2gI~h6~?0Hqv%Rgc` zUVa6s1df12yFBVET3jcjxc7~E!zv#}Z>qeC0qx~B6bVjgvC2`mY0h6MZ2hrTYY~W- zhar{Z!zL#QmcK`ToRBj7VS87NIuyc9EtFfZoni3ckkbaKZ+NWo)~{(^!;wnR`13Io zK)>k`<=+_k8d3)&`Aas~;(1mtk3~6WoEVeK_eZhNX1~Y~kWNv_S`ID6{ruq|sE9g* zbsXl@3%!T&69U1Oe=P{APT{*L;#&t}VN!aKFJ_nF+D^@Rmj^Q|Yst*V)Eh zI3Mz-CFx<6Ia-uoLa?)3#-bV%aOlkbl=poGG~JA|LZA0=W-MfAPoyYV+KV|P#p7(e zf#FzrFGHn7Ox!r1!4VGw04#eg}>G9EEug|HAz1@(xFVQAjSvRae zj?S_UJ3|6Vn_!h?^rp(?45&E~YlSTdSgfrRE%pP3R_w!3n~gxK+zTnD`!+UME_lOE zMCiA2K$P6T&_judp>qFVnofgMf||bAFVCYlUYlUaATvA$$E$0UUe2)7!~#-eOTx$JdGyB0vnf!Y7-Lm7i?Sh< zvb8Wa=}aU(R43^icogBzau1|f8M}#CA?%2xmrw2v#GS?GEOIwe2RPEBLgYSSG!r#a z6xkk+w8Cb}T*a7QM6|O!lf~7H!soHS0hv=AR=Iqg8KrLJ4w^>_Qc14aK%deN z&>tu7M2h401V!$(;*(5Dykk|I8flYr+vqzW6sZFa_y03QK7rnNc{Nh({=wl^INa-3 zs~m$bsm}7Z3@x~TLeEtQq-gZ`iX5SOQ`WTO8)!P`Kp`eXX7`nmd{`e;3`YbArp zM%@d#3SF8`r9H2$(T>$RG`BT}HH$QTG`x60+$xR}CE;s)`93Q26$JhwU&oKP*_ujU>U)9Fx>f9&*n?`jK>KJ|jf|d1162 zGNK}Vpgh9)6lB(Q#XWOMj2YL<(IF5S>2yF--@aD3d{#0*RZ}mkb$6J!Yb0pf?Rg_zQcaR8j3JNbs5#%_Q;B;zFM({~dTxJhAeNnQL zH{`j0P@dP|Ch};GDUpzTSqjy>ss!Y{BdKgs^)iq%$fEi?l24A}WEs&gQS~PcLGqSC zzR-M8GLhfqIe%g^h5s_VqyrcJLoz+Ul4bXJY_h2OP|22~oYx@#vJ|Pi>SgK5a@M`=4JIoE zy#*(cNoIV*TfrBgI&M+vLlNk(|5l$47T$$(tj!(wp*tJ7esxyt; zfu>iV4S|MKe3qU~buJ}8C}UdCdx#A_zbYBXcM8xNKFS6cZqTIPFr481b5Rq?_V+XQwo4wr9lEJ~8>WVj1(N|-I8?WYLc9*?S>y*J( z`20g`=p`xuv&$~j@&&_|Ue0uK(c8kOXwxq!f!2wMw)$P;^n0}QwNu)=AEFQusP4 zVx1JZPKsJ5bzUcRStmuWlVXlZv3w>;1)(^0oF8|ZTVpwad!-2TTjurV0<+0<)ibX+ zR>zr9E3W)0E9b&1NfGcrOKgX-#AdLv#15W}i|$EK`TRb{7y)@V4*Pk=Oo@G62N`>^ zrofdqf75uD&WSY<@4kQ~R`vyUB{4%9{kug%5gfgDa?4xm}j_cSZN5*AJPxf-Ow%4 znYABi=b)X%MNOS1Q~Xg}i}L&T@RMTUkMkpWqTZn%#NFT?=OV~ea)>-eoT}GJ0sceQ zCLvT^>Q0y4s6-2cqFWBs6@4m+i1qVgRZ)X|Amu)e#f9fFe9Z4UutTRt(cB`Cq9W7#@Z&&ZI8J_? zq4?-K`IHKh5m^sZZ^BegvfgkNRX!>MXloZ*qf*xu*$|5TE+#2(r>9e z7lAnJ`C!_W%t#?KD$+(*QMBtjd3&+IlIa6|a6Kja#wz?l5BZxToFT_r<4`w=>InX* z5I>)U88t>?_|xzo!(GF-hA#}C8$L3$7%mvzHk>lNZg|DBn8YvRJT^Z5Y0Y>+ZQ_W7 z(J)3s8MQMS%xD0k{*3xD>dUB&Q4^y^Mh%SW8P$4B#`s^dw9S2pTvGN5ue3qCZidQ_GPpWqrDmJ#b{4PQyEQRwEOzB zka$m8NW#UbJ?*Oha{9;gPTdi-UO1*5ZmF<{<~`;_(_5O;nn~gou?xzjbNN{QxcIxe zNj;YPhFeJ{kRMh5g}zk@ud7R{gTXO9@e!S=I!G)<=}REodn+*>h7C#z2PT85I38=Q zUz{KqRBlssCofO!@4@l#S5^m))tqFNU1uyr&`qxnK%Q?I@kjoQGJg3(uq^M67zT|_pPS`}dH(mSjRq=4+0d@oZP+^4VjkwnxGrT+uGwk>( zn${<6PQYKHCuwui)ixV`KJDr}m6#>82-Eow`2L`+a3y*&D_m;STbrxHF{dt*0--S4 zl?UvjN55{^4WiCm?es=AYLnuj;2@U&@UsX?Cpf$nA%7!BP-$Wh{QMM@V#`CJsWh>4 z{fmoSo);G-k6jXh73rpWMuoE6gO)KC&is;jtXXY3Y#L7nm_*c3Ofu>~YEK^KDQ{2S zX%I%Ba-P7$@h5SfNq;3jNANpXS_m!g*b|_6!_(m!QbYj`O5VMcxBkHWWY2;7T}KnD zrHJBj)s@$LKVVx>x9&%WciSrnh4E@kOj{}x#!>IX?1jQu8h$v~>wgftd43SP`@cvP zgI!e{K_k5$E38Es_17RZbn9YzgPX+#llRC{607nSpI%2MiVSTnfY4?&wcqe&X9L9IOixf|wLU%tLJmW^v?Y>}rO#f9kM`d8h18`%B?v1jJFA;AYxt|MP)MFNQR2Ju^5;Dw%5C7i@7n(fA3oq!Bq73 z4FD?a5p$y%>TZwqqK!t1c46p|vKR%GYo4A(C|I`5QgSwors1hd2fwmEkW_y<55;2l!3PM z5ey8h-xOsN>QCAxD6Tc4nCFoT^ZX~%YA{;b{{K82tB zCy>cT#oQo<7WN7EqKz|gBy$59YBOOe1-L}S`doKfg#f-#HvzKR!W3QG~#^z`d?l&&4^-bKvyQHrU@#I9F&1VQVJ z0mx=!Xx2SMr8n)!W>wN1qph$of8p;m^2J)L*xC?GVy0+a*7qz$AQYpZSj;t1aNFr% z%-py;2r?fnMouG3@2u{GvvKT+7Q(8?s9Gc!nv^Zn1 zRnW0Mxc52s;y<94akliP=@%pfLgT7HEMKGKTK{iURM1w;^?!H;1^wdRD9_N-ga3`H z8Ct0OcfK4$-QNcOD__E@+xBYr?0U({^U1146-Z6%x}C%MXI4{Wv4Q}v3%*aSj@{;a;W?c zjS*kz9$0@m9UGU&z>;@pC3Bg-%{nj~s^)(eR3DD7mAo7!)ldByh#ftQ0(HqhD@;Ha z(_-FGCFz3i(%G_E%*$rLy{L{V-~t(|FodDDZa8YJV$A`x5rY|e5G!Z!=iK}rtq z(wYrq{>r7_p?`qlztz!|RZ*G9-CsfHc~KMvBG`|CWF<|O#r$oLb};lzM21WTo?eEN zhSeuC#))VKLkquBJ|Kb6KAR4BUxvCb()#qF{<_m&c$2j@^hAca}?6 znO+Rda;AIH#{0C}dor}Py0aIp0ose{483Thb0j^07d2 zOT%9}{(9oC7yf$VuaDKcXH_Har z^JNB-tN_+?b5TNw0Fb|q*C0Yr*=e8>zv8EIQ2dpiVIZ+e;mgs+KR+~4U*9t$oy4G|4b8)>W2fLw zdCe?6N1_#^re{V7kj1A_nmw)9&Y^XX!dP2^_P38wMwFLhQdvr+CchlgFpzHd3PB|m z<1E-TE11J&4+|vi8*i1bBaZ<$k_>6TP!@u_Oxhh#UQT5e8d!4zkHfX*8P!!0rFk1G-@Y@ws{vyZE^#`26-ORFsS(sn{A;SSk#p=>^B@QXb2pN*1Wn>>?)HI zmpKW0IlgwBffyB_W=|>FhnE#23yw5RUSKN4UPJjO?H5TG6fCFpmPk69KPXLlYbqK# z_Ze)ED-jNZ;y!~z;MSd9p?mt!hKovn{n*P+1SLUp=Poud%q`YKWl!wD_R|w>P|LH*moU1R(M8~s zo0%HP{4*Gx&ge8oOBtQYXbF_%W`-3r|5!%HFglvik&F&!w2;vPM)MfWVRRUyLm3^y z=paT1GCF|Ke(S%>&BR|8{;0n{{swry%N-b4&1-*C4FREfV20jMzz@|m@j>d_FtmB# zP|x<}fq7~$efUN)3e+>u@nl~cI8TJi$-ne~fYNcXV81@FJ7gpdaB;f*kgq=JTL1F( zf%qHbdHMR_`~mcBI1B`7$XGn2GEo{53Wd@VKg+jj4|hVnRy{)PL{a8lYOQe^3f}A_ zfGasK&a=fi#HoggY56Wu9KlROYVAM8gHP0u_uz7QK9-dZ6)Ou}p1z%jbm8Q-SA7U# z@o;)(zH9wE(L+4%L}&NxL3f*DNTZ4yWVvG5jVGiu<`2yK%=65dmUOel^ttJvX&Fex z+1os?7iXU_@-B6$vf1S0&B~u;gq7LX$SOWeRiN6h zvb0#9w#>H-u=tyAn?2?Y=5gj&)1RjIOi!80Onppd<4t3uvC3FrlKt3JaTsvvqz zwu|H5gz?OM1)f-woyc(q;W%@jgCCgN0QR+%Zztq2cPmt_%}(OD^>75kMsAsFu&$60 zomPvs24_PfEP9UPDw76LP^;DIpym7LBFs9Do8}tm#R#-48y+Db#!ZC9>u9ON;e~ay zlpOe!h5LbiJ;i%M&-E0Kfif1x_g`#7BJ5&;P`JokU-*kL86j~4W#*uG112WFK@D?n zLlfdg((3B*um;ZPvk?|8_R5E@)Q4c@=oJch(UtNKxadBsD>(xj9>eTWRr3aO55N!1 zl_6{+Ev^poHq!WwP=&6MtV&G8Ot53H6G%)$gh?Pv5)&wdm=2t;4~Z~o$%4fAf1odW zP^c^^4k7GNtrnQFq5SdeCygXKu?x#jhly=PQ4wfw)F-j?L#Xh^ClpFajA9Ti;>e@1 z5hjr&xFQjvqk%*8?V<=lgYE7D?>s?k>ww>x>j!b0Xe;zEfx4>u@Hh(-c$0G|_AT&F{+yT{Pevd$Df~f<5 zdt={-&p;NWb3=qt z*Ed1?5V5UD!jVl$&~zX;yQ?r&63W*)M!z=hI@Tv9L3oHbJrg60VnhP}5J}FcK*XM) zrU;uLErZrZ7&ZP0+(RDsvJ=Q-P9P5v=l+i)(llCXV3x1?ScPXy8Z2`yeJlp^b@Ku9 zF!M6ganlcGJDR~xG{qYKFurHpZk%CEGip)3{T!s$WWUb|X<(?u5ftK}H**tUPHpxh zoM49*44VXBm%9@dsLY?rMZF@dBF723M0XIY2Az)1FAhbhMS+89TQMj84*=%=1m~Ii z9sIsEJDKCZfcQGyog-^R1~%6s59mi7?D zJxR;h2NRjQ8#XcbNjS;ejc}K_)gW!7e9NH_T@$}3(fx3Uz#XRGNCUa!n z5WMtMcC?ODk4l=xf;TXz=G6J1-9<~yhBW35fEmoqfbF|zsp;?@3%lSC=5~SD-L!Nm zX|lH<{KWkEVXPX-Q-Sdt{E!D3 z8Vy7At@_1yJnOh_Bw7(|*ACEphUcv%@uE0YvBl#cr?YI#6OuaxYaj$U&P&`xqv#V9m2?;eTTtXW=dR(|gxKGYyYFAEc>0(GVq6}e1-+7)U= zRG>~oIF`)lP$4cZ^3mLErV1S#^yZy`f{nE6Q$GFcvss^<}8k~iv#X{--3Ly;u=jv(W_@zRrnd|=oPtj!I?#$MtIrjnE|+P zd8A&p4xlv~F84)ugj|J6WXC_YG4e$kE7u}*Lgx5HD>ISlA^*+*Y9Ax-8b~=xkfI4@ zhlz~L52EP14DGOz(aU8FO-3p~^OC_K@-+tRK#D3^6gYz8^?beDdoWE*kfJL0sd0As zIrPTKGa1lfHDi>`Lnzv!peOQ!cbgd;6+pR-O;DwUI}n)k54J6kzb{`OIM+J~QX%JaE!v8EEN@>;7-d?{f9#o#xr*K4!D2)pW$P+BAww<8-Dj zXp;Y)@hM}Ou`l`5Xdwp-t%jEks|}+Jo%R3KpV#l;-qz2;<+f3GgDl5m+f}+kUAXpV z?b}+=&d{c74Vuq2hcv4OXbLn=@n`WZaho_@OcxE@bm22J1*{T=qr&4C{%yXVpU$WA zdVI8BQm;}Msw22xNFLVO`{&@h3DDLU>R0EuqY-woU}e#8|2pZ8DC}UttZpeRmOKUd z6U!#nXB`ybecLZidL~9Bm)RMb7M&EKgfFqMtu2y;IdZnH@w>@zWm!&Ao#n@9cFsn4 zp7GR1{uCpU=a|3pTmt3S2-Wn=4P=cRu4Gq0g9P@TPf(8FOl8qWo-Ab1o{A+oMV#mU zvYg#&dTIxHEzF6b9#UJk;`V4(Se^Fk7{$YXMdK{+)u1&|ubllFO;^@j4waNzt609I zR|?Cw?%cQE)tSHlMo>ymMzs!Qs~i4*akhiK~N{1`<0s&4;}W6*W5ek*7FR$Cdy z`mJv2-89B>`fkQ5y~2cAparI{l}Y4Gt8+qfhA8vYiVZJLE& zxtrWbVW#>wew6xM^-eB|{EAa-fn}i3(-L6*+Wfltaq~p-jyXZtZn|%37Dt+%HqAE; zB-_Q8O@YR5jc*tqN5y86QHACLPaEbN28gRwiJuw#`OaLm{c+yiJM`r5& z(p}Q+(Ut2mbvEr6+E-D8I7S;SfX!-3VgvpQEKP!aBZi|u8c#|IW;SXibwib5BJ)@{ z`LllB@N5>o*SMO6IoyPk)j7)O!{pUD8koKoW%}&URSDc#o0Etq&7=)E-C;T`je*9E zIVmt|LynUT>VXY8I{0c|p+rOCab-;Iz0fCG7|%*MeKrA4sBF$LgYSw|Ix97>WSue( zgm>Av(j_gpmPKoz29~D4>bRe}h~zPr*hbP>6=BuJ94(A^*ud^Awgq=RY+CNb$0de} z3!+$W{9G`?+Y!nUGWQ+EC&JzJn9E07yloSXDrJPziQdTkZC$-Hn>@q9g&(APyJ%m1 z9d>c(cC4W99B)4p8E$O!_L~sN+S2ljGWTKo<2lM~6pEC(8dys-@bVK% zJ)5v|SMMof{X@>97{gB)4l&o;q0W#9>|g;8q??{v8cq9Jc#M^38#_|yqzEhC=D${oe#|rir3;iJUgYMt6SF9J}OTcx0xNH6WF?pW*WAeorBnc(?`Qd0j z*|{mduV-3Q{zTkvwMeO8K9?_g?w`(oQ;q8^YA?{X;h8*+l&!}IkNI4|ukVrxAk-9g z?$^)&4&9H1s+Kxin;w;DqYzZxzZo-?d6j5W9nTA18WI2?{O z6n;eW_t-M6cx|Yyl`%o6T*af8b~=%xC2G8*9CZcDq_70 zM`+<(+3>;ggYl7Atq*2oIpr&Te24~aukGbrzx80DXY0Y?<*!hCh-AFC=b?sDW4kNv z5p#vVgag8ORK32&Plw^fBeFGlZa=0k$vTTlp@x@Y1A)r_5vD5UNkICdo{Pb){mLm! z70r|2G&L$c&VqF>PJCtUDdS^t*i~+PxRNp)alE3S!ut3wesg#E*oUxT^Z+?)uVH$<0 z#(5I7Ou!cr%@CSbPo^+cIb)^zpngng{H$Wyc&c;8;?GR+!c=yTFjYFE!D@I1TwFVZ z!c^;wnbc={xf{D>P#D$D2=857ifV4I@?Rb#6Ze?4k9na;hrD>VyQ$;vcV~*~HL9 zL@OCu(i>IV3~fD6RhAVDE&Q_sZ68UMljRI`m(fbj^`;xwk?!l!$T5eZZT}tZMT_Q) zK(tKBZO&K)t-m}L!PyLK={d^Fnz@(i;%6~*?^HCT!Rp{(%w$0AdDQ(g{cMFe2K#;n zL)&^{*DC0s(TGlGsI8pVZyNKTmO9ckXI0g z$Y=ti@r=f<9~(asf1~g>8h>N(H`X&Ye%!&;r}$!t9OS_HEb5o?$5f(fFa=eEy`l2Z z=rpK3i?>1)Z9|3O+{=>ReK~2^+ zyus+u$w~v6uEe>f4#3kl6qu#}1t->{Y77@t=7o0n=kTN~Orr|2sot2544=7^5wXtqA4aSK;tr0q)m`BXGr8(-0{}Vq^A9u2Fa@28bx@l zv}5iHJbZySMRB<2RX85r5XIs4mqKYUZ$I{O!6%FGyHBrxdU+le*LdJHo9voJyKba6 z<-;qVWP&)JO6s`hiNkHJl6ljg3$Vuu74==k9y``$WpRH}z%+d9k|#UKX91}AYXKgy zKR7x}wyvg{#9RfIUY&&s*D_jej*_aCM#}`DFm{y_8LHroYY>O6j~3hE@b^=3e@}02 zI>J%4VnqbrqD1d(;>cj7F3oo=RyclOG}=hd!NG%Dhen68>zeF1pYR+=;}a+>X}o|l zXn<0W=CfsX)DBb4q`ZaR(FD#VSW?IROsI%O5GT>a$sw})!(fa*^(!8OX}pM8WGY!? z-m0`gI|A|%k=9BcU5NJjmyn?^tyC&WJ=QdiH1ehas-dWD8efp`2?~f7324J5_>ef$ zB4ak+s`{Njgy(#R@-hA7N?5_+a@Nt1pSHEzo%0sTkjxlTRo6 zp`uJ24W}R5>4Nz2^-u>UO^D&N$&e|v`awTwLh-g|26pjWkS0viz-9SKRLMP$ z>bfh*>z|CCfWL|On}ol~o+qP==6JlS!PcT-keB(6FHEVJ?FS26i=sVpYtbe>oSD88 z5Ai%%>P#Ixc1DN9}*O9tRs*UwZaar=x(+%B4?nykhb zj3W&{7`7W+`p@(W^pfs1-Bj&g+84B$nvXRV8m-tMrU}i$3_-;tZs*m-+;3bBnrM7R zn#fo(*0LRS^T#<7!r`E|w9K9+w=bh7UQwZLmti?dy8JBLRrGF)fBnf4<&tYq5g#I- zLw~%y5vh|Y<>f?7mkW)SX((esO}zX!h7;s7NKvrk)y5m;=_{y6B^D_r>rlzdmoS_l zZ)GS|%gZS%DewmZcx77e!A^M}0(fT|QoN*n% z8zpZ?iY;S60TsUROz46~dpgwha-Tw47>5+k81%)I2hiI^p2dI$3iuAEz*Pp|Izp0{ zA<$V)M~cnBbp#YWi6Wny7><*lMv5oHy-IznJZuC_tVfE64x+o@$=eJ($0WamKmu7I zUq^QYS+3No{$qTfTkrNV$g}XUVZ3&kS9}C}t4H~6Yl^nWO^C;mr5%m}AhPafZlk2qln5Wy@$<@FH(i!!|rpM~wvJ-H0cU zh4KX!zo4U}HzwKS{4ppg)XncG1g!H0hsiG?m_X)v`3i>OK{9&DlI%Ygabsl%b7t2x zR--ux$B+syd*eJ5(zQ@dQZ7G0cLbU1jjD=4dy1yEUV3@cI2u1keiPjhq>ROffpz$J zA30+@)`-lOA4Rv2DyZpnUe`EvU1RC7#%Uyz+K1<<&a3cZmR*)vmNbh9*7j-LAf~2Y zIuuJwd|+eyG=F%uH5<=Up_1NnseM|mnrDi6YQQg$AoK$nFg3xm{FiA<1Rxt)@#)7a zWnMez@{IXo+F>4?y;ddTD>1+OD%biM52oR7I{s#OW;~d=Ba^orP$hbV%vmQm@7~P) z!+-KbE{IxygZ26O3GjodTlo4}{b%8Cwr5uVGT6nNHmQ2xMHXd+62FK~SO2PhQ$2}$ zz@6cibGMiBghpby= zu~4=6do)-|kFt8!ww3kiI3ZSfa&^be5x(9lA}GR6EzsLy#GwC=HjYKKllR^T z{Ji%@(6sE{2!HQ&5ESSz-^Y6+0=)M|P?#BX5u4|wFUwlB^sV_cZO$HOJ+xBeNxf6% zPXr~`(=7|JM$8~=kdmGu>`v*L>cGj_P-to#1Qm({V z|FZyJpL3NZQUxD^hF*{22Y-%oN0@Zy@euN~;xDHD2JhWr)W4Vd(@^fht`n2)BD6=z zr+o{a6hF0%T0`vyqQ^y)R6m=F2AIECy&Waa7oRuCTs^H$Yy3{Udr-g7Kz1k$?ek=` zaT>oBzy3QFq(;9m1S)6zh#%?9Iv>b|*#@#*iE5g#6;~`zdb2*Z!wRQp>^22x-rv~@ zH!u4c@WLXx3Y4Yo^x3OZ8eg-0ww=@}{)*s!Hra3v*DZ1_(3M!z$sIP3jaaQ3g`?rh z^L8M+W{1|BFeB$LcqtoL?EP>(yaNl|#F*+w*5T`?`!=ll6H2O9dq3>_owKpEJF&I( zsVl6oKMPlZkJAd3lgAXc%p!lhO2dq$ZBT%M|8BLRu@=o~ofjFt5nrm7g$Ce6?9$@8 z8a(y+)NBK^m0`!ohxI;CRkRh&w)V_MySnFUaW%LLE4ErmT{UQ{O@90$whxZP(>2j9 ziHrnig42#S?BaEBM2mU4j-gdy(EzO7Dj<6hm1g3i)$v#jGlOHd%5C{$9V)RX%L;|1 z`PeMH)PcupIqNV*Rr(67_of=G_cDc{{m&=xE{Ib&21^y7W)Dt=C0>7bR9D6ArOU;| z%H)wAvj@u|GF{|3XTFbXK^b$MM<-Eha4%esy@7MSkzH7~u-Hx(9FxjPGkxT8L1-@@ z!D+j~*!J=v@Jwb_Xl;Sk^G@lnS!gTfzbBqjAYNY-^$AZ6Lx+q=1eq4rN;AgZ-G(|H1^d&Ny}Q4k#?ZStQh;AetbS- zjz&bW^!-tI4-N8vUyX+SxXD0RG5Rf>j*JYZTMdL2v){r`7J6aV+YDChezO*?@xtvR z7g27-@Rxjvx0JC=yfCu?MiU9i3rjneP;SKv_{c96|ArMa;L|72ww+}{w=c-8*a5fn zLP_8M!iphqEpdC{hI6cficN6gn`kx6xV^B~DtHlEF(XX7Loo?%Nk&EBKK=bvFHEn~MOb1m`3|KhOn64`(?!_9;7T7HCO(;SeS7>Pj!%Fq#P~gBk2>`+smJ1|Jz+8D2lO z66H&WeQ33V6y^iH6}0I$_E-iowDxj81*MnaB5#1g`yElx%pY09{TZ749gevoHGHSc zR_~gM;u)#p&ntKXqpGLsu*!1w;ni33%qggIdec;8avHxhzJzx~B^jCxb$E!bVgz)p z;k$b94y-Tnnjv=mm2<~a9k%8LUR!gDFkAEHAJ(I3`3SGAIl_vqd7;5mWo0~MzAz7uFa^xAtj~GB(v$Q4yrUO+t)Lo+BJ}wKYJ)~;8DLYzd+6IF!O9YFuzr;Y;si}!jXmZlRcLX&+j1xg{y}1cmZs@`HXoyUOcza z6l~mY>}hxb{F;#`w|V{?4LJYvV}j@FkLI6N<3Y*|{k>-;TQ(%e!^zXw4qVek0-a3s zP$~5MY<`7q&W0>x%fRM7n6Mxo_CJkmv}+;w^Z6g^N;mYy*d&Z~8z;uYp?&kh=}5r% z8}qN~rf=wlk?g?h*LW!Q=2`l#$J3zd=^J{2xpdz2W3BT^<2I5Tye%}_#&KitKF1je zFwBJuiP<#^93m4AL_tH%j4t$M$ugo?-8Q?X$HAzr3*M8RoDWIn^vO^(XP*m2U0Y&W zU7lC!7W}CNNB`<%g64+ktheo6;IZvq_-j`*rq(tuT%~?d{U~f~UO3I9?Uv}CgJw5q z^F^(`zWE}`7@p`ZLj=tYskP#%4}E==Y$Kfp`O zJq)ernl$qg-H$xXvGCpHXhx)&?sAuSjeAhNb3HQBz;T+%N$z3_!m9Td;{7+8yd-xK z3m?I-n$rw|>+jP7`hxiyEjSf2t}S$-rlbNLlO`z9J)V_A4I}psSwqX=licIH<>(;U z@BjFE54b9h=wJNq-pVa-!4d;1T(C>I*t=kCv4Tl7ny8?$H?YNMl%^1sC}_eY8Xbx~ zh7y&V*Z_$dyTmS0F}4_^g2tFw|KBsakpFwX_nv<4&hB^4)LnLG&iT%n(T(6;`bBWU z`!#au(-?SdhKH=(Ib?0P%O;&e)`lBwjR=2b$3zIGl?V^9pz{=8%tnbYkF6Bp7FI06 zY*uw1Z8ofRCJa=|m3Y=FeDc&j>lR`%sjjXaQfzu<%()$QWxmPS$|+0c{Cy+j#yXPZUX zja?LBFmt<1xRZ4fp@U5kVF3F^gtgfT5!Pj|MCiss%1Ithi7C{>4=_TT{z-5G@o0(r z=EhwXpYLBGhEK$Kd+G~|xH zT8Vl}Kiio=tXh?oSZ|oPIr^#WqDVW2xm}|?K4hJ);b!TFvT5Ra09!A@p6rweyCql5 zNv@ddZ8mn)^wAtLSZ1ItYdxujHi^}(`1n&-M#abXWfoxp5e>^aLg$?f zb=|)4@jF`Cc9~tbH^r)Knv_qy^YMHgi*bM3sWn7|@@qqDNg=Mje}268Mnq$2@c&^$ z0=N_WriPKW1x|A>nqshADbv`&aN01@pwWM&cj|uDMQQ)h7HNmc59JiuS8^e%J1i=u zfiJeYU>oBnp45&@RJ?BZlHQ#UN`*em=0hal9k0%)c6(9Ucq60h)#8w zPb-t^qe8)|dV~y=%IYLmR7$%VtV99OV-XF_mxX;h0?Lz*uo&f4DYhb*MUlJ}j-WxU z0Y%Ar1O}YGz&hWHWyfLn@aRYHm^EqI~$sSU}t#M?bvN$HFMl zsk?(zuF=ZBI(E>+o$Iy<{jwf**$X5{}mO$}#vk7=609psd;@cr%4p68Iada@TAnG|z z+w-FWzlSK7#T-s z3MXPQ*sJy^>{~+J>Q^hecnQ3o`SNE#15sn;e*CwbigoYMN6#mGj5Kiewi4kxlH#_* zm;JZIDvpE#=23CKK?HXVmRdGr5^m`J4k&ZEi0X%Z0NnvD7{BD7SXi2_aQ)*b#I32ix_Wd8sP2Z;-DyY1b(Llp-! zh!4RLTGpc|IJgtt)V~1)^4$VHZG#Fp_z&nXM?pK>La^Cfx{7%(JsegaO=Imjjc%sL z`yM(tj{VTxWZP+rvRSP^SU<8Hw#>rdZl2RMq;j6aVu(x_Ne*u5&WN=svpsry)U(yX?ozu=Bh9JiJnJOhLs)*XY_QC=^s&6Dea^B@c?JoR&!f#}L)fMbXGgQ29`_J* z7Z#2yNy{pGZEwTwY(J}#Se|E}ZO2Mybry^`)?@H?DD#LU)s?s^Ea8DdRWaOB2rbv^ zE)|3pV{ffWNLkcUA-DAs@{W`<&1En`rhj)>g(9wLL^siDym1cV3Weny}9i ztz@#0Rlc=-Ia?1O*NT`}%|K#$50PW|vd<`$Q7UFZ8_%w00c!`~(cN@*D*NKsIf8v7 z6|j|?&wj*u-V=NwqrOD6B-%^q0z)@9kx}fn&lL~c>~i1jZP{CakD8K#Xdzw9T2!fU zy0QjuS1Cc!CsLH!4q?6|@^q_uC!|r#Z`)bXI_=k%H(B`Eb}eu#)%Lv48uo_t_|0dV z3)YJxruJk>-06VkpNo5EkWP#Gq>r2`3Xr8!EOhhPwyd=DcOf84hgsR+?ICOsmAm7M zr78<-uSh1J@!-l7P`F0 zF1pjjSEJFilZrHQcdW!I@;K=6sYe6%3iqw<7GE8rN!^Mpm}x__@IJ=GzVqV&)Qq=XXW=y8OBM*_RsSTqiKOzStj%HK$v1y*9%**N!mQfPC9MO7dPwfu-G{-Y@^`l3))6+HHP7Q)X_^FAAxJ0wu~TxZ z4_Ilz?=6}4lx%;RJ4TMwy~n17E_qLJJd-&AI=$w;RsIIqAUx6d6%7eu!AmRsU6Wfa z`O~UMRc-q;NK64`&3V_i-IomY(|H&)ZQ<>1$#5^N9nL^k$*rZor6TE5DO~fLD<*15 za<#D`^4hiP8~2fV;|LKqF3l$gH*T_e;{Yo+ZoGQqNKUzN6V)3>h%!%)R_5ttHy5k( z^u)1C;!;DcZ)$pJ4r$!2w#&v7rck%XprhV*?`FBPn#%^cblH5jMHO~*G38uQngT~p$0K+ea1cOt5O}|3l6-zwU>iTK_ z)o#%agHzaD@<(zl*s5YBKg}unL;m0Y4N6cLQ|YTVss3Kl{MV2QNg+>3ub{~iZeZf? z2riq#o(jDTotoku=GGS^t?;L!FU})%rb3&_YWI1y%zIfRqw=PnK<@?@GVsY_aS*H! zr%Egi6|U@7hhT*{6@3-u3+amc%L!JfQ&mm2QbE%dh1Tv*RTDn!x>e@eP0s;cc3#T*Eg^b zk&wcgs=5yKnt;hz1Xv+ZMNh@MUBF}_0<18oBL7Au1su~TgB!l#|0$Y=pq-I}bz@sEXclp=km5gONWD=D`}3&as9 zdUKSL>Kvj`2AmARt^=8o0#0Z>RD~m8%K`kMfU~}ZrdUXmO~=?I;RC=nR*>4$A2K79 z9IuC~GGy5SJY2wKjhCq~*>{lieUVgZh^2^r6GWLF?xLH3>tslh`996Ji#BVaP} z0De!vS#K{^;rNts1ivfb$Z0DDObvcV5xj|KP^5=FhXjWxg5OTBhGE(P94TPW+pPej zM>3LyqBugJ1#eV$NZ1dPsa1moEM01+2n&-B;6VhhF8D`9^XCYr=0G7_wTsLT)YoJL zh426Yhu*-5L%?MI1l(W1Wyin~6mb4M0rwMd{od$0E8iP|2;F7c7g1h@ zx0KI~Kr1i(;brKid~O6FJ~z5><#QttF;396adm07teH_Aj0jV-G8pNplpzrDO^bMA zou_Q*lk8kp=v=OgD$?Z(Yjima-GMEsPH#wqnef{@GxHmn+mLl1@ja&1S2tS7-7rXi zyEDMK*g-bFi(p}atrhdh4)`U8Jof|a%kt+e^^wl2aoh9YUxKAy@Pj*xxl0>J=M+4z zV>(!53*|(k`^5r7cAy)GJ#`-+p zg()+4;0TW>KOkr6Czjqdy@OOr0Zgx7qiRJt=u=1AbU1A}yB&7qZs{=hl8fR~rJEZ_ z#oX~D@&BYQu;R{GxXp`|LqOUuGT`}$^Iwgl|Aa>ERgJ8lE{%l4;q;Eu z9>qL-O^^CrX@%533Ls-4l3~lR`fPf1DpmrvM$8Y2sNIHL6pG#<^Snzz@>{3ZmkO2h z;?dX>!guL5JPNaxg5Bb?20Vp#NqDC!;IrA2O>IN{xtDH1<^I|}5?quMN3(J;Tt~kM zz>>xri0jB!;hwX&dO6@g7H>or`Dzx2VTq6Jf^K-&V6zw8;Gki-N>s&w6mo+DazT2$ z5qE2470lK9^>a|#MB?y8WNh?eCn!GFN_W%mj}&nF3>3eZF^^rCvXD(09Oij-GJUMeka^^~J zv+W`VFY`3=C>b^nY|Xn#num>rH+altwKLO}xOiroQDVO>nBv1|+$b9+Gsi6TA|pF( z6kD|jS2P#Eu%;|-icg034_>YxveW!5uzE|iT_Bp+F>Kp101nFA<$AGtc}OI($hSBs zQS7!WJ}LY2((p^iZxw#4@%sY5FY(J@`}0_Dd!S~Tq=Cy$ID8x~O?B5XuY5MhH71`K zbh5`#RwlO^?ke5M!rhdybNH?GK`bJCoxedEMGPQbVRMs$QkLy!_?+D z{%_`oTOIOO8trNAM6E{tikmY1q)u##u}#2&wOuFxFC5W8mAh@{0b3}~`2wvbaN{pdpuY)}Sls+RfFRxvM?W_B z1h_IqpF~;s(5+O)_W})@fI9#lx1E;)g?bQ2JQ^yQ8#ikkp@{;u|1;a0e-0ps{~-Vn zyX*6(09x`PI68RP1bTVK{e%Z+EFPq$d^f?Yn&k~eDk+_hA*yk*eOQk`~ z+3-_dyp`-no{2(2tHs7wqym;%y$?WEZwH14gpTK zO3y<)kU7t03R|VODK!$fuu}dQ2cNoIzpMow_kgL{6Oj8LHTO_nllwq5_x!W<0)^97 zWim_p5Vf2EYB}jY?ts-ZHM_c;{%Se)3z=#;{g`z|_5^KoYG3wa*t#)p?x}s$+`}`% zVGIdhS6%nsYMxS$ia_>j__{8p)Lv?Kj$AaeD3FRl-Hkl~lUddAan>NkzixDD56Tr- zWUIH}uyPHa?rl^t%F$6fE~%lK5%!_0&)b&J)3&OZ9$@NIDC##`IS1O3KNBbov6wRhOfka& z%6|;v@WiQXIV+z56smq4@#kO7nd9A1i$H!<$kq_&_&Jg#R@5+t@54LXd5zv;6%UsC z@iQn4%70OL;?;3?)O#Q!9v$y>kSduC0J}E53F4aNLlDY}zmGNW9f$(`7e~~;nk(0y z&jjFL5d**W&e#E|q(dZiS%8^y@xmqo9ls4UkoUk5H3y514?avy*(QLf-3fL!02Df2 z2sDs;;RucYU+}y^#5&&aXF}%-l;&W9UdQ5hqO|3Rf?LUMVfR9S zs|EN6fh?lymoS`AF3$xz1V_9qeS3i1NUXd#N-*{q<+58u#f)ABn?xZJ922NNj`-db z%m#f9L_PMJ60|-}36_bduw)n}h_cs?R|(+sTiA8`HGmd;IF62tbyMo|@h7Nx2k08# z8)OFlo|T^h3W7S0=!I$$A8(6K93Db}<_=wkJvm601I0rq&>A)$H{d&mhfsVJBR)^_ z=FiH_i9$ zT=V)fC|&BV!uFr=AWR0-Tni#Ui?n~*m-r5YBzattx|w$VBJ5yGV_yFCFKD!(T~aqP zBP#oIE!>8!HM*t4U3^kkB}-+*%iYhS;@H?gDgAV>6gv%lt^8x)E%`4f(U)=kw_ZGR zHx29J?kv~y8;EPlJK+dMeV-IB-uWCE+Oh%Qk=0`)PGe(MM6x>&g}($G@x6;i+h}mi zy8n(SpYs$oQ$$scMHCH_*`nW<<1a%*T^3Q9eN*5qa1WJUM9J=nxE?s-gXIl2tQbGP zqGaz}pd7b}sHk`G0Cl?ybdNy2aCBr;t+I>ZG>f~7tO72Q{CtWLBWqSf=yHMn1{B)} zU|kV^03eX}!x5W6)yu)u1mkP|-X(fGz6Xi{4m4PN2Y~k68%J1sal4vcrY=mRYZg_N zOom=`>`HYWFyd~>mkG3H2;QDQ0^rB|W-qPJUqJ%)R~)h9-2LSaKIICv`~U#d?LNKe1keIU z+(H66UnPlE0>JLaemqA2&jbMd7FJ{6HHlFgU<{WCm?eOt0)VOuqci}Gs|ot6qA-`Q z$a*=NN%nCKnXlijhMu3oGT#jD+yeFSx{Gj7;B*ZN%U-54L?(VhX29sLHUu*3%#C2% zC4Q`BRdY53yGHfOZioe=iBq*qTC^dU*_LjA(yw>+79@%miKLuCIFF+9>W@EVFp4tB z3RcG9k%=QIzT)!gK-PI1_jmo=H~Xdm$wm`g^Hnb_Uc6IE*%_Jb+8LSCZLrPqhBL8q z4N)TDRbN4ru-IACtejBUVrC;mh#f&GG{3?oZaJYg^FJYp)gIT}SbFO;INO%Z4}%jG zrU>zhqIY}MR`q9cPfa0ZogY#mZs%cwLm_U**;kuUh}!{nOoTtMSDR6YTP_y-QT%2Wy#<}; zhJLDDld>5wZ_r4GL~d;8w@74q#+HikKGyir^)>boq19C8OeDu?l!$r0+SZ6%i?B9{E{sc;YyY(!lmq*2$PsS zm*i%!aD?urNx=zoQHWqYVJB7S>fH^dvCf1!H6@o=W5nNzX#_hjlD^A~c~tp#z}Qr@IE4G`jKu)$L<#)In~pGVZdqnQP$@n#_`r1 zl_YsQ2mH#^iE5&<37-VAE=|=$tn-PS3EFX~VE?FKb*9~Q{j~0ZaOn92(n(%bPny<(UpTA}nv9X%DXE0OR#_mMZ zZQ`1hwlTn+dSd}B@ixb_Xt3&4GF{>YYN&NVe~_NLLhbzmK|PDaHM6QhXnu=N%3eGl0$A7mC!8mrHXZk3G4JP^c>+LR=EsWc z{JEzC+tkS(v{7C=cJKY%_V8f?kH_T3+U3Pg?LUlI8w(#n+T17mX;rrih*d#iiVl0r zvRS$C>r;qk6uGd>Wl>237Wc_fsDcYoO_;1l0s0LaEvCk{TtYLU5 z5cm|QTx)iRWnDka?Et-=BAC{{mM8Puu-A|A4~a z{#XJvp65QeK;DQO@j?;EADLTUdd{NvkedA^`sV85ebBA$LvB%7 zd0;NQi%wg)cCQzkIx5#9{llyNpu6>yQnZ()zeP$Oe~;7|>3yDKvHqBzUKx8gTKE`X z4Kv#IxAPAgIH0?J%-#R=lO{`#Sm-;*vl21v3Iv%0ZsOSB`UTLmZb8K=mH3Kwh#$NU z85JNIROxqrw6js~pbig2Mp%=*Z(|+4;yx*Iv&l-3mV>rVzl0onyq^nIYr`MC*o@72 z=!`#DK^v&Nqx3s5R^N}Cb58&~?E#VHX5!c|qiCa#^gF>Fc)Av z1S3?UvLr(~nWDng>!va*e^6XB6#j1>roy}glI7n5jq(w<(o-O1KB5EdN5|q-1io8n(9--C`7QmWip-#S_VFooEp7^?k}?f*oS! zZ=-tamAt#9sr8Qd6`Z3%i8=+YvqpvUcjgnU6>$2A5EagJeMhh?VCg{v6^_5KonTC9 z$TiM`ekxp1cQe5n0T+aRrf!&H*9!(ioPbLl!iv5~Tx8uqj$%_CnlQtlx=_H8_vWfF z&6NRvDqx2XY71%H@`MEn@;g`rl%D3uK<5jzMBbt%x1Vw&_!9w3-S99H8zUobsiz@f zo&Y_Y_^9-Y%DAnyQ*_R)_-(`QTi9^7zx0U1w|tOPDjbEIT6oMd4TrOEaya*c`%?E2 zV1d0bKDV8)t+&P4!fjsGn{tG8mo>%uzO^}asXH!}TGm;nTf!_}<{Rc+=45cMn#*w* z6rOP7*u^+pcDU7o?}dxuZbcG6npD{?MbhQp?o=2V)@U*n<;A0JxXv-+KaoLF+S+TmY;a$Pk}be}=W6B)<$Yp4)O`O;Tvk%qNO z$40h%EPBKcs$5sSw@_STBi6Dw)fXL)?HZsq0y}`e#zn9~faKw=-PI1w#?5QXle)P> zdjl$JQAXPUX`#e7;~{W}j+oRwK>9=FO~zuY8j|999Rk#TTGS>$`hsVmWkOs!$oJ!1 zd1UzN)&bH!{t9_1&Lj1mL^ER#rZl3w^}mZtN5Gio>{Q;_(87rAQbkMmvzq>z!*Ge? zwnBPLt0=U0w7axP+99Sx#ACDegA3liQjix%lPbmycfoe&6G_!*!|a&X(DO0_69^&aUoH z-B1@gK;T`@W7221NNiF5~1y~Nwc2Hkt0S)9Y1)Bfu-Ui(Kg!EhE07Fgp z?cVx)rT|U|ASwrZf|CH+^R_rbfvXP2?Y#Hjq@Y?S(1>+~-h2yyAg;#|+TZa|9byn~ z zN7M!1J~r(;&`g3}P=%HOg+twa7^GL`p_>@cmi&=G>4PYkAgt)jMg!Nrq-;h&z`^r( z(cn4$%tk$*BH#lA*Dyt(YzGQuJC0~w9n#-|9>UwcqB2(rw1z~&!pc=sNgn8k9K2+`ig7{GZ5T}Hn1AuK)lX};5Pz|vJz0^z9dFOwNMHU0P;+8jp z20jpudPx^P0gieVop|xI7BH?*{9nLHuim3NKOP26yH9fE&W2{ULQSA%j_fu@Gsn#a zCB-~T-F7YSI}f)a2(9v6DE#=~+uWPFM%drm&4#-V4B zwx3#lTED}v*~}|}hp@j3!7BQ++=u7GaTWHp!1hN{A4QCZ=^dE84=#ek~PS9TId7X5uLJ#eV5Sf&kE5* z+7t=S-Jm1TUGm})ZZxTEt4O6~pTetYES20wO*iVGoy7s+4hZ_P_#8+S2?=;ZUsy5& zZmy(7B_KDwpi(Z-41Cf#x~0mlR`C}~y;$ZV+=E#pP|@QsIA#_qGDxlYTMlpL6NgF% za)*r`o6{Z6K#|Zwy{g!I1b3w48F0{$40nC+4bLyv(QR(7O5{JmjJ9aUVKbMlR7JYx zzv9#)HC9F9|3W5Lb!W^O^GeWtZ=ZwHjTeO$xR+9jjM_=Mi$K zUcgRSyXY}=C>NF3A&JZGs7yv6di|LD88b}9=-H^trJfmGsA>%r$y0uqN&DiNXTMSW z;QdE%`Ccydj?{r@O%LxK#J2J8(gTla|I5O5Y|VSQ9g_2ZD9ry+%6wC!(R8G}&iBI& zcT@L!?mOLM-P_qN*w!$6#O^5OAJ(zEvIBc@#O}*hl^z&6U|TiieV#%O6o{Y)lJ3iB zSfFqa305fYpC}weA*WE@=PMjUfhd&sxt+zrciJ^XX!lZ@jNa{PGI~#WzD~bQ(}U^l zd#Ap6#Kg3}OMiicou(}`N!rEQA=n?s&*P+PnSJliGM*KW+v}Zh1C}}dY^ZBjt-Vhq z*|L}w=<2s|Rn*yg7oKFlAQpTiE3WG0OlkS%Ui|jCTE4med|jQXy9Rs9Irj%b`Q>!( zZw;E+80Y>ju59OicZ0I$kOMpGN`bEH?e>q7pm20Szxs{JR+m0W@#wZ6zap1Mx8g?$ zq~D8#ey?~;gYQktOk+%)OkT!6!0cROoN4T5^fx>M)qI;Fku54NZoiAGyLOgB!^XmrRy6_rot^F`H*uXlRNMoj@4?z8tl_Ayj69=n zLk#Mpe=d&XVW0X#uT{r4&!2Ux^U%QCy-iVzj_fn0n2r?VSK=}qIq>g!n|T?QnjO+& zsP#UE&(_S+7PQS)yUfyN9ppwProXJ;V%{y^CueqWW%tPZr@WgS*Xb$@F^2BO$;J-G zllUIAHXX6%T0d2WZEQ_u=_svOy>Cemk`3>(ycb_$q3gS$%<tb1 zeuim-lO<=Dwh{8NzibH7O7(RtefSBdt3gg_fsWY@aR?5{*iw3gMV6DJx=>cUbqj9Y z)>2o?tv{XLME#6?t0E4@usNsm>a))y4%%GnBMu%guptwldb7M*H@%=03~fFT-|o%% zUTpWUgHD#+7w`7&DHkZ}djG>dENt5iuauteAH?qveurH>-~V~vX3G5cUHT)7BzP-K zl#WEotFcG)iZx^CULBKIe6KK_%cMKfL&w&qw(=uKTEz%msiq^BWVY(UI&bW+8|=l0 zu9flE7}2G<7dEzYl~|61&8Dp3m@ocFA6b5%4TwK7l*5pY^rDZvUY z-0?%43OhF*CRm|`U++Izg`@hNC0L<_ODnFbFntdJE3|OOcZ*awfAs}|6>fOs^Bw;y ztZ>6ae`>12_)e0v$_>u|Wn5ISqTwZi6>@lVlTsDVJbaa4g&dwx27Q%~jz|?n4ic@5 z^jY!0!UF|d(0?1$4-?Cwx$1uyv;9Pc9WLz$S6jd_9~>lDVTWfG-XMLM!U?}l3m70n zIwXY;F73o336Uyno5)PzgJ0i#S%vd=9ihy?2d93of}TQ1+rJgk;DZD9Jil6nqo$XU zbT0wN&b(Be_dp@tQ^}+jnw8n(A{I!vMBZ-+_#`0)>$BBdqlT(5*G!b9mI`-QzU$WA=6R73E!?$;EX^~L#R&okB=**Zztd?J=WeR5C1)(JZ&rB^bSj)?WuZ#(zdxsx{ZJfwqVHx zV9K_&Kq8MpcP9k%&k03ZD*=c9vPXsE(~Bs5sDK?`;gJ>6@!LhVP63x$x~i2(Yw|Tc zvLQ+)In@t1Sx*3CZj2`(w#QLb+HdYrf-&6(96Btz8qO2S#Xtc^H_5Eddx`jaZ7JX^ z?MhYioZm~_vK9iaN?0ynYG8AL6dWh-$fUWh3^@v2mfunQj=6Mx$6s@o_VlW;GL||P z#M-2D^p5*X=20KJOJ>)}gU8#L4WDW5$q^d3Fyx#CoE#p%L|GO1X7Rc8JY2RCgxAKeWpNLOH+i}havrBdum_)y+w8taa2 zekyi1WrOy-37@8!zmzIo4;>%h4wT(mu>3FboUX@Nzqohv&|idM?}=7FO|RFj)3xO$ zI0tCdP&z1$H1!33LOV`=DSs&k87qwQu))L@Lzw;|^X%NLvy4#q7p-{1ffu#?hr`~i z=qzT5kLx)wkNX}+Og2@^jg=R@>A-Z*8%NB^Lt$U>@pgd2b*YX6ALGh?wd`bF0&li1 zCGLSEX86%PF&Kq~i4Q~(fqbh#Pv<~Y-~!Nsd*TREj%r`Ba$l6$lFt`t#K=-_7Dx42 zF6#4n6(*2mFX3N5q6y^_x`yp0&wD*I5J7w-P)7zVBzzRE+w+5T&B%Ph%K)?h^;+YK zKUW{V{W5D)oymB?)9wg&pJ+r7H{%GW5HMb_h!0@9s*CD^E({b+`4>hiGN|!|0(~2G z!rux^4#LiraSrp}570M#SiD?}DL0w3iq4_(tIz&JaQT9LkV_^D8JUQM)@v9UUBD<>X?Y~#}N%*43pti7{}nh1*uxsr&P-*N->LN zTmZUOppQfXSR*hlAXE#(7_s~L860@|26 zarDn<0q2Ci|GLclpGvx=YXE}wME0`>m!Jh1k6Ydp%1N5nf> z8Kdu&Coqv-sie7X!ckZnqXMeDSjf-ioN~ zT`*;jqs8#pIsQxs;inUR8IPAYAU~ZX5;oxsJYZTu-OC$+`MfMoQ}BrXXk=PP>SVhO zot(>WQb!7B?ZpkT332dAWII_@hflldiwS7VNz`Q$)fOXD8&>NS#g=3H;6s@IJMZ6v zgN##1F8wsU`EKgA4^WmXU8mpYA^}-aq+bEd&ITj?LLX{oQ z+1aTRwUIbl5wXuIN7^yO1Mp$@*E;One%TkBRem@5OndfzDe5(v$9AK+dS-7tK}DtI zSf?Y_?eKLLH2Om|wqhv=YTX0rW{*@M`@C}W`v>Bx1M?qC9bI|Zlf8fxIz)I=Uy0`! zCIa#FR`fvuHf}){!xSXubBi~>j?!_9S6V#T06hBAdrDlTZynHjKcRkjw=n9g6c*>v zTQfjfjr~GB-Y~9r|6QBmzS}+3qf9#BKGHqd_SzU`J1sUD>S?QEy$!!0%d`irA6kQ? zD$8rQf6KDWw7g}hgS~}*pq+(`Gh{z=pxZ0AQ*N1VGqr!Y^~9F3cZ{B!3IVkzvSiUt2Av?@NXXp0L(mQan=VdkP@*KOjs1WdsP*#%*!! z_@+7qJ`tx-k)aeRRq_X`(SJ%?!c9k&ww(V$?I-;@O3Y$cBvORYaDdv33E%BR#pDyN zT$laSMp$E{q5J{0NOW=kxz)`#G*QH)-08~QF??JT4Wgbzz@$r~6s%+$`3Tymn^;uHQ z+4br(iSZc=)gIHsX9@}~vm9;2^B2+m6!*F|o==gJAzN+h*Ik=ospM!pRPZvgFG6@t z(eD$9(~BZRcj}*G=(3z1{S@)&UqB|xqc0sx&Ock2e@^Guq>I#2Bdx@sPcoi@rDuw< zqtR$MXUH+c>hcWT4K{tbZlQiFYjXPBP*?ovbFlH4#>u-p?E!G`Mf+cWU9Y?)`|j+y z7cA726^i^L7R(N1*7N7aK@AnRCB`X*vypCDp{}#%&slfdY-56xh%N~xA!w$tbjl1(KNLPgQ)Gh(^g-VaHleQZ@y=DoQAI)DS%$LZ%oWZOWt1))}J3ul*=lMc+f z>bz671+s3dNTLawA+8-P3)eQAPe>ANL#t#Ldwz|`t}fH9K7Yz)Gle8p?^KF?boVq6 zkrFe0K{?5+!xzZe`Zt^W#d$y3dYh$+>ucDZmGBF!5~0m{DkQO*4vAKD`kY!}{W&CY zc1^J*wbCeOYY|KS@_e*x&0}}JL@RXGt;{DwoMf|s8C1qvHY0=bUd^%)VY4m|P9%?n zO0Ri_wm`30lYSmB{-3GG*uFoE%+JRyjnkI84L}x&SZxf z6*7(e%c#%^%;zh@qu2luj$jK#7|C+KIv*-q`?Bi@ZPqTqiR87A_{Sy|diG@i>+$vZ zRrCm%)-9oH)1!linQG z*t#J&aZn8sJL~7h5@W7!sN^*Ey9k%DH`meiV%ATDacqGI=d)RoTmIOX1weZ$YwP8TNx+%P%wd|~DVWnzd;#G)>Cmn}xtGwPzB8GrcrCyYo^ zs|7)v!zYQqt0?MXU$b5@K*bgX2eJ>h2m4DcQa+e+5x-0LU3PsirF>KEQ1e4gU(FGX zdB5c@9BGX(H*@>j?I(G-+iJIHw-D1y(=pT6a2BGKo*2uF+&E22XTkC1L%gt8<*&AF zwt2SRw%XR;tOeFMmJwh6T(~$jxP2!a@MjLa5$JlHP;Le#v0xY!d;M6#p8lb(sHNp; zl9c1BT2|gb%Y4@k4aFEfa_vxOO3aFK{C;)CthkcySqJUvC-2qhhU)^guds$T8(Yuz z#_HiK$*ylQuWXZ9VsxrEhSyg}saZ8972C4l+?APg<)gjYM!M!MW7JiDtUySLp_KsX zSMCI|wD0fur!4&Fs%zm#*A_4L&_2*OqqOJ>Q-WL z;sbe_+(vp175q{uLV81U0{@kN@-;C#c4eKeNRZaoqA46cjov*#vq+pq+5Ue9g5HE735QY+1d|!q9GUQ!-F`x;nnW1_vJY5>^@Q zxW!GXm+Jj>1m+HyVYsy!%ET;5*~M1K{@U@3mCpr^Jy4rc0c4cnzmg#Dh$E)%uM9A9 zy(l#Ep3Mo(0*X0kOPCQuW|vs_cR&L%V>;;&<80V$cXMy95Hq6e5O7h-I9Ra0yyo59_ii%RXZ$De;kq3e%O;>$+7U@K$=o7 z6X@yhU>hTA4*wEp5Pt*|_4~aH6kaj{#qD;%VjU#rst|x}9u`pk0RT)^0XP`xw1fea zy(ZZ+Lbe5tm~}$0#ajlG>?#F-MoY1V#_fS@4x(VIMiflvi6`OQeIzQP521>TK^$hN z&~-8X7rJknK#u~&&E1KnOxp)Nz&W5Txf4fNcrKtKpbuO4Njo)g4NwQq1XmclrGO?3 zgE?^AqxW-t_<8_viYR~@B5`{@1~6t9UZDO~c1H)!@S^;W3bck$%+CQ0;;nE*ZYrtR z&fmg=heg!_t>zNj`L{qD3lD!wbQMc<*O%yi+o-#duWPMYBAf=j(RixW$8yy2iKUtO z0*G2|-EO)abDIzD{zX%Ysh#nb@eAWyhDW@c-5&$gNtN;SjDwOC@?Yd4Xqw}vynrGu zN#XrvCAJUYD)si%wNuc~;)kWF%E~;S6-RC5zQ2wfJK}m}o zK|!tej9`WGS76_$!fC$>SfTtCtb3}$_BOXkTA}=9oqq*QcalQ%OTP?la>S)4ps8N2 z3q@p-LiCFzKPL*@`T@ZT(a-ToOBD`z@h8Cw(Jwp+_5>k)+EqocLiDS;+fRjyi%B|B zA^J%VCabVB^d7+q<*&@WRW&?Em@pK|U+mli)tQXFNzw}CFFfFofZ@t#SUK93sPg`j z(ZQ$*I{1))72@A@Cuo>}NhPXq{vvslDj3$`k=?Iu;yhib$>wO+Y3huz(NibRm^Hgo zn^tYwwjGrCu~4B#c81nmWPG|0-MPfM3Y@#PI-9u{2%aP0@W(v_{02Mydq1l5Yyp>* zQ_m;PQeb$qXfv z?bUr*Y`^P?)=d|!Yx_T2H%-WwUHsFZN91~A3)IS=Z%{j?3f%E(ZZ!@q3E@)&e!chJ zYP`7CCBi2Qyr8^C4L(ow$0UJEtMAp|*&_dm0*__q6`XF&1VwbzkZRFt+(GSl&zLHf~igFj`d<=9xH%NCxucRm{E4 z&8gBVwxG_ddGXz2_-3E2G_(AVpfvQ;XL|B|V~IPmR1rAU@qwKU>P@P{ z&lH4SDm>&Pl1WjJFuYILz{AkTB`eyuG{n}I_9+(?2esJhcXXR)$YQU6GFrdzcEdQTmpi|cKX|!$Kn*~UDB_9ui}}&Ww}7P%9wKB zN6N2G(fMy^AG_SbBAoWmpa;xViuUW{Z)bU(|2DIqrl5S`w`YAT3A)R{9iExjPlQ5l z{Up>u_1N=Z18zcYm%q*H;jjma!f8*vm`6w4gsn=0V>-ewkXLKyjpJWDzPzEs8PoY0I+!bQ^xbB;h=T?)ox!_k(a>jJ8C~dxty3iE zTg`YERR0?VpszGr1QBdNjjoNbJwtD??LQ`6IOD3k7htM#C`! zN4D^2`v4?6R?v(d{IAgGoqIk&Crn4JG8|vq*{?t0u}o7c7I6&TaN>R=Rc^zB<=CB( z)b^R(=_aqph`BvBWo+dg{C;4WjtyeV|b=pW>}^3H?+|1 z)*sLg(T|WX=^lBc$gv*2vYWe0%5onpb+p~F#c9fHwKbE>4=@C4=vL@9$aLGf-P+Tf zWu9mb!F+9>+hVu=ZeFHKrc4x}(eQ|VVluMMgpugNax*{JpT>m#IQrFa?Cc!Acrhj% zfg@IZPbT#Di7~JMG6|S9n1G{z{aD5J!+6YQl-c<~KcKMm(L7-u zABQ7Mb}+m1YyLF6?u{eH+c3EEyZ{2~;)uce_bvxt*@!Z{4FE|Nm3Z^vjj5_@P!5LO z<#jRa-gn8&Gm&RYOnek~?od=$Gwrb64!H+Otx_I*SKqm3VMK_kf21@!*UB3@ac3jzdyy3Enk6*vRM%Q}a z#a90IOz%pSAI@2)v)&+rQs%>%QW=|@`4IHUY)NY3I-2>=sAHA=AA@0bE7`#wB-}Q# z$8n-{(=9`|F#xhe(Rd?6mn5x<)j1yJ|QFXD-HfutgK=S5GK7$Ny(&|99 z8CCv+;7jPiR)3`#4}4)aq-MygTe1@SgX!|>7A(~B!8mF<`6!~MM;A;FO!;hnmFu(l zk3P{85i-a)RO4~VW4*^zk4_#Y_w()>+-JInvwHE5`ntx%Ke~o>sqPz{`W8Z!Q(>PK zn3BEXkt=(}QajJSH1rp+sisn{Q)|mZk&ecGPKu z>qO?`7eBqsY4^KZH=Vpjp zY#^;obszWx&=XK|4^t*xevIE=u1S}ld{xH?PsNQjv2f14h!Q1UOb$hQDI+f{Q>@7 z;H3py0_7XUXs$g3V|ms4Bo8bsuk7~2Da|zc?d76S=x&sR-M`jB84^TfI@u5TN%cD>i{C(EgEyGsdH1JD^ zYRZ46C}E)N4UN%w(ioNf*b#fTsa8$6idj64he8&A_A|@9c?E2lLF;jV3TP@yBjaUz zX{;&|G4hr-&j%DFjZvY~2au}XXsW*r5m4)>CQdm20@_rH;<*Ti@NyA5I>k~yV_X^Z zEn!r@UWoE#c`a`@KQjI|3Fl^h&n2w@+&Xn^%;2E%DcZsxwJwYX{&B|vtp9{7ASNf zK>aeN!MxmU0>D~536v3qUiO9cH~w=t0a-**%FUQ8qN**V@SsOgH$1W`7iO_ojy)4ig<^k(A2~qFh9S@ET1n@PfCLypz6tzc0Z9^13Z6HR17KNVj z#YX_X|CJ3`7yrHUDU{8YPMSU-^gbP>pRVnywXlzhp9ZrHQG;u_wiZ7f1GdBmawzkA z6&5Ou;#rf$0{#B5@bz$}oGyKe-#_^Mi{CTX>C)%fE3Ll4N&o5P$$`GR!J}Sq(tn!y zGS9!D==bmnPNEHlR7-}NA1oOjHfDJDg%g(B_7rH@#CKnW$nLM$*CPCv9TnkU?70XZ zvcUH!{tq@%gtyoV5mvAw5tg$`5yJKA&=(;#_cP9hf}JF>}?Uouunxeg5@E! zxxeE~s`eoR$5Pl!z>bZX+y^+5$cc=SS8O=kmQdr~Wqn6by}PkbMA(sS6JZ-xF2WFI z{ea49&bog z@cv~J=|pF%WNSWr0Sn$;c3gy4*(*fIwu>ySMP@J?Ig%nTu$3cUIBd4#en~!Vt<{3U zTeq$4p_6T;AxYjf1(lW06lT1=%sA?WzicaHEl0g@$hKTIXw(Zo*|r(qp%)=ovy&N; zRHr6YLN~HLYHeSIB$4kJr6I9{8X+s4Eu9%gi>j_<9Y#}a64_)C&Sl?-a0WXm!imXI zFOs8P7Dm02Vy5G(+D4P9@yPQSz;$vuru5ny6bFz*uJ*)wLZ0Ovc8ACjdobZ zSUkp{c?g-_I=+`bNQQ<=5x>kbm(ch0q$=A;zEE8`PKTapw+q37RCh5F?Uh zbdOin5ox=PB0?p#{!ELG->L<|aF5p{uaH`!|AFR-VD2}xju2ON!1RPl&SvcMIV zYsm+-{x@8qxuy?&0Yd8zf1&0T99Le3E2^w?5nk7$B#WVHpLR{gw7W7h@{l@yw zjRID9u4T(VQ*(*=N#vsNT(hn(_UHLsEl?Fko%Ez$Qi!ge1(7PwV>T$=0sB0WKIxljtW{^$}6!*}Xa^zahIRCxz`A-lst|p8vK`ek*iW>1G>$p7y371Scs}*YJ`) z3RI6*_Erj27_W{;JiRE4Z_!d(i=i5Yvw-xwP{W8rEg=)xtpO$QQWA`PQOmDxlBZAvcl;lU;3T`qscKhWwd}OX z4pDOtfk#@vo))$gzb@ZWL)`_OKCqGh+KA@jkJKg*$M4|^LW)+Gq2kr6(NJ#c!(Ok@ z#u|FMGQwUr)yt+kVjmshjG0&5caywi@W?W^UBPOU_d~p-H^n)(ziE@auuU9AWOQxr ziDy<3%C`pLLRKy+MuvE$Jnj8@`P1G~5q^@bd66#OA>EXP({OBMy%`0t{R5v{$aDBO zQbmo2whivX#rdG;KGFg8e1(^tU!4uLAookc7?fxBlO?E?ig{(ZH>U6vNM>jGhy1Lp z;e`7`cxC;%k{9>ATO0bEv_8)p^8812Bu^glnGqDs$RTy52J83YE#WqFQ2*iK1AFy( zk4b%>`|>ozgN+GqF&qqGmQ;RUa{SRg7+^)!P0$JaMllwILG>W@)^nCrmtUSBp=~#KX1T`D@ZQp zr&?6rJuV@p0Z+rxNBTp#TJK{=8-MNj90GS$fiS#D;fOJXG_Jp^Sj0dti58a%A2WPW zo&_}u`HX7x+bF7$c+0ZlUg&lBnn8v5O2qo`*UPCg>HVKKSih8Xv^S6+)TBJ-06sgM zr&0@kQ!rLp=#6dU4vETMS26p**LJ=aNi9-^3WfcI9`gMfEtTbK0wXU>4r*^@Z8x;0 zs1Is!MM2U~nh%`ckhZQbBxd5eq4E~y$JUEH%2i1yczw8>OkEUplvK#S_C-E$%}+U> z>}jR;v#fo!QOx(a&1}%yh{g~R(d_j2r_fQSQZ?b1pAfvBeWbHWSA^|{JkLb5SsFvn zUj4PsQsm(5ha7$p!1~Wgl}mp78?5;W!chaQxRZEt8uEvDdqZP*T1j}CV)rFcznu~Y zR;NStIaYUK*L3&)g{egmG@myRmWPgJ``o-F8lhna2v zNT|C>nQae(mV8R+Goh$m0Jl(GgI;;D0psq&54?Dm2JdsD%nz-Im!uW(va~;5D@ieN z@^wv^=2wljyQz(BgJG}kHn@s^+vZA>Y<;zM+nWZB{H|e?^$+V_>*xA1>u77JRo3^k zoYf8g|LVFD@TjWmKXons?gfs6lR@>w;^Sw%JwgDsuI9S*%Yxk2^OxpFjq- z=j<0upKG5gw>3-iIY-&rBTL_R<_yPf&9TCA!!cFUdu1ag7*q`&aeGU?kgmeC_+JRz zAkK8QZQ>=j+pQ0grj+GMy=VqGfvW4oSa%gnY^aeERg#97m>9%W`K zIi6v}!LX{CmcO_1(JOd{u4Zd(E28)L!6Kjyiv$DLT>P%3S!>~X_?)2edpJ=1w>VvM!?%+&Jpu`QQbkL+MwSTO2bt8jhJ)g9i=Y)t<)h z1&%_)^RyO^oumKo#eTz8^BpGZz5L(Fb3y&Rox58+Q}0HHK1AnmT}CfpI!U|{+uK}# zzHFIZwiKyAT{aq`=Fllr&;zGc%=&}tE3!9Z{+pLs)iRq}W>;8nmpN1>`-OGW5Onbn zqOnhR*gOM0THJBR!;nczv~_X0@Zb6(_O;k2t8Q_sLA$0nRdTFe+`j}i(FgQ~hfaj) zB=Qh;f5hi&RXKHa)@#c#ToAB%IU$gwz=>aoBY`|h%E7`vF7pEciS?yPW<8RwZRXl+e`%H%Yy#lJpb*P9D-v*<_E zwAyHceFU5#^TUzvm%G$5Gv&=p=B!<>MQUDfsW%b75REe1e}$Zyx#7s()h=~wvekOl z)P-y{E*0`xVk}3juUp1hR$1>fZ?k%-GvHwDiuyx#1>g8$Ei!e7ON~;t=wWVlYZOCl z;REoRaP}H*gPl~o9obB(Im6Mz+{`D3xt%?u=e-%JnYk#z-XqsGc3`aGAz4;FAW3s} zHxinO&!nW5ad!Kk+fnf>$TRkV(SzY!7at6_IkQYA%bO+P^%fn;?6mUow_Qk5_O@Hi z)$hGUmA7QeEsyIG8;Kns^Bv+Ub#m9b3tZ=1rLJV>8_p4q(~id-9&NLhYyXFRmR+-v zEysG;`Ws8LMYr@s(CW$RKX3%@Vv3l?<9w~Eg&^5aQOy%4MD#Zl&xr$s{l^@J+qDj3ATbg*Sz!|fE?r+ z1<+zoL#@o(=MT)920Eg!-+tMzH!=E>pwAj;ZX=F_RDNY8%O9EddqIN**P+GAZAAW8 zCQTR8{U`?iFZjo>sT%6KO4M~sNFN9ZMr-KN7#%#H>&gNR`p{PM^$?^xp$$S>Atd&d z5mPo)bxd=YaX5N~4ANsuyVA4utXi<#R+hYGo` zdxHk67rbWlW*Eq+|5!%X2pWW+j2suDYC%_`1b7+6nIRY1$BEAv&@?$Xw5sZoi)IQq z1qE;f@}UNtN*RM{KNWPppx#@cY|Xoi)uep@hx7slUKQXa0pvJQ+}+#~3*ZnA75V5f z`8^%K5$-4P1kTF9Lx#Qr+J%k@+H%y$OLyGE=wQ&Ge*W|u3>EXfCFlmAP;|chRgePr zGMgJPCJF<85@3!1IE56XivpY!pn-wA$D=Qy@}A2o@BF3bJLs+FlA!W_ltNIxBbRd0 zx7dkQ;hz5me-}!`HAs1&{Zh>q9wh&Wo=x)7N5C_xgFyx7&!QNGODyvH6~(EWZ)jLk zv$dB@dPi>nXNH2;t(4{UsF3q}XT9@z=R~bJGG=X}8lh2foCgVL4%9r66*-XXQC;v+ za$wDJZbA0Q=faV@eI8Y_$P#cmkfp(+2G%}RWB03CtmE6Xa>o{ria54-)lZw$AhH$4 zt9wJM9lv(;L$Kk$Z~zEP(0tkae#?TY;ph!7sNhSZYFsB$1(wYmedcdo%chBA z{4fpy37+hw@7BKzN1hoMuR<29>wfib4v}_M8See5??04HhsVbwm(!QG7xz-_^}5D! z;Yj0s$ffgmWw4KxtTm9rU{>V)E!8jUfPLzzS2{qrs(Fj|+!dc;J)_`0Z116<6_9Z?be)@DCP?s_wyEai@|W5S66qS$=?y(IWL=_JI`YgJ&yV3Q{!e z$YqZ=fli|r+>kK%=BZRRjH}5kK##5@(dHO<(~2j9CTp5CbA#uwb1q&6C;cY2q2iodxnn3Vj*(6$WKg@& zm+x}x^PW#euWyeSMMG|WuE=i#I5=p@in0`b;pfJ7js9#X#4u0;N_V7&J9wVVtd>#e z5@o13#Nc)YY3zHX6gX}RTQir!(exvo7I3ipxoHx z1~P*Y$g#`k?8ZsT+Y-=4BaKE$VBS2nC4r7eRpusIzKDh4!aI|3JaYMPC_D<2`5g{J zMn)iBgIpcq2HuGBiZ&TyJp^mqB(w`}V=)*~=}a9zP2PDZiRsi=qW}0YTBbA$LOa#1 zlVHf#pKpj)bqxv18V(FL?K7EY!T0{6=NuB!Jng#I)aZKGbI9E>CWShB6IF&fNJH`e z0g?H*erz_opQ_C}wY{g(liFX*j=bJ35i6PA5V(l_;81->?_s?n;gm#mmqq|>x-PH>L~+V%#5;rn5+;L=&Nejj5*=e@{5R0=whxGk z%?ONfHES&tr6`g@WjhRAy%8ECf5|!!Uh)g??Q)a+=RIwZ5fY=Ibt`N} zk^FthYMog>oPW48avL7)v$IQi_&~CXgb$>sqkm<$i3cD;bbx!>&AeuJoa31Dapz>D zaPQ*O9OwA!^rpsIMLX3^Nl=|58&-F~8H{#`magVDSLFc%!eM8)TwBvYMYJ_Ob<<`^ zGlwPr5p*nvRi&z&wut|ub@@0IPLuaT_XFuk_PtdKY1VjqR5+$No|eAOd(&qD;F$PR z(AQHBD2-fHde(_o*s_hc58oS9;6927K3(wpQ5GlbSFtK!K;bwYH%!37(I2pyD}gu_gFu$hXoE;4sUh4BU@WL?sEul#}KQ z)xAPm!c;Ldu#O6?6xtl2O+k5V!aqI%-5IKQ z!`Q)EXdFXf(n2B4783N)2$aF-1wlVE&@pf|ZD#Z?ovRrN8pgg=N3G}f2kkwfZDpFF zuGUK#PG8MM5exMltpqQi4Qvt=I~vDWz1Pm#K^fSRZwzfk;n zUhfOHr7EA@GI#*?-c-NYj}W#8wx{xEYY-h?5>WYSfN&xY=vjOW^OhvzV|eNX*$(uN zEcmAnYOlyuWRXn7M!U?9JvQpUS-`pF1CFI8&mPY*&qz;P+#B#p8XWs$?5^0~#kyk- zQM)4n737s+v?H?gpK!RdM#Ho%U-fA;Oq+9yA!-Y9W$x7s{m48RrkvZx+29MM3oH&` z>^Y@5UpJ(Afkm3%Y6$!S7y|!mhLA1@3+Z;}W|r@%;&eb|>qoAo#6-583LwZV4g^NJ zH3ZZ>sy4qU@m0I#_{6yBgDXd+w|SjK{%}5^Mw;p9=K-}rvmZ8Ez*%S6wcBkUZZg>( zGQg6{k7VgbCUkI>pSciFxTLDcnF}3N&}#n>-lWllR6|ls52)X(MDuEM7+IxC)Bsa6 zGF0JT3cke48x6c3qL*HVY-3etk<1(&IFbr6#xQd}e92FH!38Hk01)VI$LrJ`yxCUx zK1F%UFh7tF@?*TM1;g;R{#$0vw=!#j|M>~u_h}pfVG`;Yqc;q^9wbW@PucGQV3rR8 zFkOKAQ86Su2962vfdH_>!ELabi(~%*1*;37 z7N_F?u?hRs)y{i4xuY{j?4FLG(ymB4>%C}xe7E7R5@xY9i z0me6WvBs&d+05H0=#d$4jP}gHD>6gSDJa3~A3A2{ZK&ZSP`JnnTL05esF%@Ua~SOn z8lLQiy(4V_(3RFOKx~`CvB5|ACT8HsP4(Z42uLv)x1KBL?*y&6?I%Qg9fIo40>x=U zLG#!%`!b`W=Q277H1s&mSh0IHl35A5p3%x#mweRzF}{xrD13vWj@rm^2^1;31ib=Z zZaN1bn?4gjytQ+0MK=~a&gf9k;KUYn*^Z%FLAMIZp4*iA1m7b8FrEcxtMr3m=wKp! zu#znW37NVK63$nN3%B0Pq-#PtBP2d!g}9i!^75)Y;Yltv3Jf8==SYkIJiL+d5@>5y zDsOCDktq#oEkn!YSxsK}0(R*2|LVh72gbTEOS30zip1?h@$n|}-;hwUtQ~?PEt*Gi zsF5nL9S%_41^ztEIn3>tatQ4paiE|BK>AVUL7s1I+h-RI5&jL}#C!rZ#2aZJ%@^1$ z)qJ~!AE#&@c7*+ltPkE5mqBM+840_`8hj!LCiPAyr`;PR&B> zgB=Fafc>2tW^Cv^2iswrA@|=e?KL*%m)G(hr#Bi9r24h7*zWII!k^|M?9cH&01r{y zjy*@sjrPLNjH47@&%7BZ5i?&>)6PQ zH-)iidLkq;7+Ssb7+@%h7CjO@O`Ag7xAAV~5bzi^-FU)k*h2%{-cQmK$H2>G{BvS)81YK;ixuFYKQP|$gdU~i>n z!QT^ntKem@ykfTm+bV8%o^O}_Dwj$C54KXnvEeHfc&Whi00+g@MGyZ1%W_0$*MxS4 zX>67CQ|^~s(*(>nyhI=dN!(z>K8s-!>K>u)1QiQntEHAALd?djX0z3VOSv*VZ!k+~tfj=1YI@P++V1*=^OW;3XPje`V~F;tHZ!gW-}s8y z%$WL^``lN|>nu)n)I1QMvWMMId%|)1l+x^`7wy%K4UXSAdTBpu5pA({r`FDX(!L&X znZpPMQ)gRbyTg`f{mi<~I@ubqoQ21432xZ-&&ogM`KV|&cmG-(^1yyX)K|wp;7QXP z*G|+hw*zodkI=?F_?8*E4rB;r9v7|{y32_eB39O5+5C|EW&K~dOGTqY?SXVhDp2P1 zLd)f&JO9SWY#>2mrH-Gz22Zxdon)yU8#s`q#DZg)w%8Y)GqWCn#)4)@v-V4K8!qxK zS&`$S+t4iR*FNIvywEYk)1_71l$HCrDNhN$b|X)3)-&Ks4IKG?frsd4`URSFfbnsl zhC~bDqrE`7(K|rWs7=3svjA9CI#UQ#Y>C^H5wr?;H+lwmsB%a`yrF5Jn?P|^o6(=? zB)l{oiy)4hfEuhm3YRTI4M7J1XH#zX7&RMR&@S2qZ8|4>?9jV-ZO~^>+d`x0r$0-N zZUurq9>t1~8PWym`RDo2FW=2EJcuC+h0z=E_qkGUj*ee6Z z_(Ip{qg{gR1QMiEP&h)_xs$s;L%MSf1_uHP1_U2&v=1L{3=8Bxmf#Kb8O;@Z5U!A( zM)1wIL5GG9GG>CIVz#t%cu2!nDou-irFdt1TC+@BkZg2>9v+Brs@t;=uTCXJq0<*T zAp=mquJCHf=CEd8gBsMmoqMF>+&uD%+WRkP#|z!=2%NxcE_I665A6xU34B3fw1Tpk zLlX4UnIXi7@5U8;k5V`S^w*#d2kjZSA6Ebe2YRXC2b^xAn*x6za9&PVA5H7dd|9|c zDBBXk_c#abIVk7~L9a~hkKC7gg9r%UgLxYTePdiUe6zda)>VQ|!4+Th4}(6v!j%c5 zN>7duzhBT`<}KK4X7<2Icc9q3;R@&MOlUq2;Lm=v8rO6x^#;`l7{-kV1ee`#JJaI1 zmJdO|lG%i(m<%9?df&jE>^(?o`Q$UYOd~M*|ZWXc8beCljrV$@J1@pc$iM z9TQAx=3yr7nrF4P)Dzay!6V(WZ|5MC$bb_%*X_%0NB_sScm%YFg8j5R>ir!7d%nd^g3(MO-7@vnL zzS_{P2I<;A85xBug7>v3U9qh?!LSe4AjLt!>Zj*E<0<|W5KQG(9V;@N0moJaSG+c? zSf#xuL+Z|-sYvnOf5AK-e9pBMLr)(sdU`L?k}|#mRDP|Kfs2NoY^V8NQ$P(xRr1?b z^J{YwIKlwpnogpLM{a56Tv5+LajOa*yLG&q*|E;0rf6`KX^U^AeDvout^eXdcri@0 z8}e$VA+KtAiw9aufR*0vSG}q9DyF+5ckux0tdJ>>761%W-LKK@`qe5AGL0_oAMNp< zNwjoDMr5IkNZ%;^v`D;H-shgbOL~4Q=pib?SOk%;mLEPr#$v4?O9eS2NP{2~WEge= zM=2SvZ0YWAWnk)|*7nmRbT{NS830yg`=~oQ8fX20V|{8>XyddWaKwKUyiKQ#!(1{v z_k)Vhev1;@i!8Fto`ow${7qiwsxTsWjRmRsnek+XjMsi1uWHytd~^^4f}JE_v;YI^ NrAsn^8!%iM{|6KvL}CB{ diff --git a/changelog.d/8386.bugfix b/changelog.d/8386.bugfix new file mode 100644 index 0000000000..24983a1e95 --- /dev/null +++ b/changelog.d/8386.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in v1.20.0 which caused the `synapse_port_db` script to fail. diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index a34bdf1830..ecca8b6e8f 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -89,6 +89,7 @@ BOOLEAN_COLUMNS = { "redactions": ["have_censored"], "room_stats_state": ["is_federatable"], "local_media_repository": ["safe_from_quarantine"], + "users": ["shadow_banned"], } From ac11fcbbb8ccfeb4c72b5aae9faef28469109277 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 24 Sep 2020 13:24:17 +0100 Subject: [PATCH 038/134] Add EventStreamPosition type (#8388) The idea is to remove some of the places we pass around `int`, where it can represent one of two things: 1. the position of an event in the stream; or 2. a token that partitions the stream, used as part of the stream tokens. The valid operations are then: 1. did a position happen before or after a token; 2. get all events that happened before or after a token; and 3. get all events between two tokens. (Note that we don't want to allow other operations as we want to change the tokens to be vector clocks rather than simple ints) --- changelog.d/8388.misc | 1 + synapse/handlers/federation.py | 16 ++++-- synapse/handlers/message.py | 6 +- synapse/handlers/sync.py | 10 ++-- synapse/notifier.py | 55 ++++++++++--------- synapse/replication/tcp/client.py | 12 +++- synapse/storage/databases/main/roommember.py | 14 +++-- synapse/storage/persist_events.py | 14 +++-- synapse/storage/roommember.py | 2 +- synapse/types.py | 15 +++++ .../replication/slave/storage/test_events.py | 12 +++- 11 files changed, 100 insertions(+), 57 deletions(-) create mode 100644 changelog.d/8388.misc diff --git a/changelog.d/8388.misc b/changelog.d/8388.misc new file mode 100644 index 0000000000..aaaef88b66 --- /dev/null +++ b/changelog.d/8388.misc @@ -0,0 +1 @@ +Add `EventStreamPosition` type. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index ea9264e751..9f773aefa7 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -74,6 +74,8 @@ from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.types import ( JsonDict, MutableStateMap, + PersistedEventPosition, + RoomStreamToken, StateMap, UserID, get_domain_from_id, @@ -2956,7 +2958,7 @@ class FederationHandler(BaseHandler): ) return result["max_stream_id"] else: - max_stream_id = await self.storage.persistence.persist_events( + max_stream_token = await self.storage.persistence.persist_events( event_and_contexts, backfilled=backfilled ) @@ -2967,12 +2969,12 @@ class FederationHandler(BaseHandler): if not backfilled: # Never notify for backfilled events for event, _ in event_and_contexts: - await self._notify_persisted_event(event, max_stream_id) + await self._notify_persisted_event(event, max_stream_token) - return max_stream_id + return max_stream_token.stream async def _notify_persisted_event( - self, event: EventBase, max_stream_id: int + self, event: EventBase, max_stream_token: RoomStreamToken ) -> None: """Checks to see if notifier/pushers should be notified about the event or not. @@ -2998,9 +3000,11 @@ class FederationHandler(BaseHandler): elif event.internal_metadata.is_outlier(): return - event_stream_id = event.internal_metadata.stream_ordering + event_pos = PersistedEventPosition( + self._instance_name, event.internal_metadata.stream_ordering + ) self.notifier.on_new_room_event( - event, event_stream_id, max_stream_id, extra_users=extra_users + event, event_pos, max_stream_token, extra_users=extra_users ) async def _clean_room_for_join(self, room_id: str) -> None: diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 6ee559fd1d..ee271e85e5 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1138,7 +1138,7 @@ class EventCreationHandler: if prev_state_ids: raise AuthError(403, "Changing the room create event is forbidden") - event_stream_id, max_stream_id = await self.storage.persistence.persist_event( + event_pos, max_stream_token = await self.storage.persistence.persist_event( event, context=context ) @@ -1149,7 +1149,7 @@ class EventCreationHandler: def _notify(): try: self.notifier.on_new_room_event( - event, event_stream_id, max_stream_id, extra_users=extra_users + event, event_pos, max_stream_token, extra_users=extra_users ) except Exception: logger.exception("Error notifying about new room event") @@ -1161,7 +1161,7 @@ class EventCreationHandler: # matters as sometimes presence code can take a while. run_in_background(self._bump_active_time, requester.user) - return event_stream_id + return event_pos.stream async def _bump_active_time(self, user: UserID) -> None: try: diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 9b3a4f638b..e948efef2e 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -967,7 +967,7 @@ class SyncHandler: raise NotImplementedError() else: joined_room_ids = await self.get_rooms_for_user_at( - user_id, now_token.room_stream_id + user_id, now_token.room_key ) sync_result_builder = SyncResultBuilder( sync_config, @@ -1916,7 +1916,7 @@ class SyncHandler: raise Exception("Unrecognized rtype: %r", room_builder.rtype) async def get_rooms_for_user_at( - self, user_id: str, stream_ordering: int + self, user_id: str, room_key: RoomStreamToken ) -> FrozenSet[str]: """Get set of joined rooms for a user at the given stream ordering. @@ -1942,15 +1942,15 @@ class SyncHandler: # If the membership's stream ordering is after the given stream # ordering, we need to go and work out if the user was in the room # before. - for room_id, membership_stream_ordering in joined_rooms: - if membership_stream_ordering <= stream_ordering: + for room_id, event_pos in joined_rooms: + if not event_pos.persisted_after(room_key): joined_room_ids.add(room_id) continue logger.info("User joined room after current token: %s", room_id) extrems = await self.store.get_forward_extremeties_for_room( - room_id, stream_ordering + room_id, event_pos.stream ) users_in_room = await self.state.get_current_users_in_room(room_id, extrems) if user_id in users_in_room: diff --git a/synapse/notifier.py b/synapse/notifier.py index a8fd3ef886..441b3d15e2 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -42,7 +42,13 @@ from synapse.logging.utils import log_function from synapse.metrics import LaterGauge from synapse.metrics.background_process_metrics import run_as_background_process from synapse.streams.config import PaginationConfig -from synapse.types import Collection, RoomStreamToken, StreamToken, UserID +from synapse.types import ( + Collection, + PersistedEventPosition, + RoomStreamToken, + StreamToken, + UserID, +) from synapse.util.async_helpers import ObservableDeferred, timeout_deferred from synapse.util.metrics import Measure from synapse.visibility import filter_events_for_client @@ -187,7 +193,7 @@ class Notifier: self.store = hs.get_datastore() self.pending_new_room_events = ( [] - ) # type: List[Tuple[int, EventBase, Collection[UserID]]] + ) # type: List[Tuple[PersistedEventPosition, EventBase, Collection[UserID]]] # Called when there are new things to stream over replication self.replication_callbacks = [] # type: List[Callable[[], None]] @@ -246,8 +252,8 @@ class Notifier: def on_new_room_event( self, event: EventBase, - room_stream_id: int, - max_room_stream_id: int, + event_pos: PersistedEventPosition, + max_room_stream_token: RoomStreamToken, extra_users: Collection[UserID] = [], ): """ Used by handlers to inform the notifier something has happened @@ -261,16 +267,16 @@ class Notifier: until all previous events have been persisted before notifying the client streams. """ - self.pending_new_room_events.append((room_stream_id, event, extra_users)) - self._notify_pending_new_room_events(max_room_stream_id) + self.pending_new_room_events.append((event_pos, event, extra_users)) + self._notify_pending_new_room_events(max_room_stream_token) self.notify_replication() - def _notify_pending_new_room_events(self, max_room_stream_id: int): + def _notify_pending_new_room_events(self, max_room_stream_token: RoomStreamToken): """Notify for the room events that were queued waiting for a previous event to be persisted. Args: - max_room_stream_id: The highest stream_id below which all + max_room_stream_token: The highest stream_id below which all events have been persisted. """ pending = self.pending_new_room_events @@ -279,11 +285,9 @@ class Notifier: users = set() # type: Set[UserID] rooms = set() # type: Set[str] - for room_stream_id, event, extra_users in pending: - if room_stream_id > max_room_stream_id: - self.pending_new_room_events.append( - (room_stream_id, event, extra_users) - ) + for event_pos, event, extra_users in pending: + if event_pos.persisted_after(max_room_stream_token): + self.pending_new_room_events.append((event_pos, event, extra_users)) else: if ( event.type == EventTypes.Member @@ -296,39 +300,38 @@ class Notifier: if users or rooms: self.on_new_event( - "room_key", - RoomStreamToken(None, max_room_stream_id), - users=users, - rooms=rooms, + "room_key", max_room_stream_token, users=users, rooms=rooms, ) - self._on_updated_room_token(max_room_stream_id) + self._on_updated_room_token(max_room_stream_token) - def _on_updated_room_token(self, max_room_stream_id: int): + def _on_updated_room_token(self, max_room_stream_token: RoomStreamToken): """Poke services that might care that the room position has been updated. """ # poke any interested application service. run_as_background_process( - "_notify_app_services", self._notify_app_services, max_room_stream_id + "_notify_app_services", self._notify_app_services, max_room_stream_token ) run_as_background_process( - "_notify_pusher_pool", self._notify_pusher_pool, max_room_stream_id + "_notify_pusher_pool", self._notify_pusher_pool, max_room_stream_token ) if self.federation_sender: - self.federation_sender.notify_new_events(max_room_stream_id) + self.federation_sender.notify_new_events(max_room_stream_token.stream) - async def _notify_app_services(self, max_room_stream_id: int): + async def _notify_app_services(self, max_room_stream_token: RoomStreamToken): try: - await self.appservice_handler.notify_interested_services(max_room_stream_id) + await self.appservice_handler.notify_interested_services( + max_room_stream_token.stream + ) except Exception: logger.exception("Error notifying application services of event") - async def _notify_pusher_pool(self, max_room_stream_id: int): + async def _notify_pusher_pool(self, max_room_stream_token: RoomStreamToken): try: - await self._pusher_pool.on_new_notifications(max_room_stream_id) + await self._pusher_pool.on_new_notifications(max_room_stream_token.stream) except Exception: logger.exception("Error pusher pool of event") diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index e82b9e386f..55af3d41ea 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -29,7 +29,7 @@ from synapse.replication.tcp.streams.events import ( EventsStreamEventRow, EventsStreamRow, ) -from synapse.types import UserID +from synapse.types import PersistedEventPosition, RoomStreamToken, UserID from synapse.util.async_helpers import timeout_deferred from synapse.util.metrics import Measure @@ -151,8 +151,14 @@ class ReplicationDataHandler: extra_users = () # type: Tuple[UserID, ...] if event.type == EventTypes.Member: extra_users = (UserID.from_string(event.state_key),) - max_token = self.store.get_room_max_stream_ordering() - self.notifier.on_new_room_event(event, token, max_token, extra_users) + + max_token = RoomStreamToken( + None, self.store.get_room_max_stream_ordering() + ) + event_pos = PersistedEventPosition(instance_name, token) + self.notifier.on_new_room_event( + event, event_pos, max_token, extra_users + ) # Notify any waiting deferreds. The list is ordered by position so we # just iterate through the list until we reach a position that is diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 4fa8767b01..86ffe2479e 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -13,7 +13,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import logging from typing import TYPE_CHECKING, Dict, FrozenSet, Iterable, List, Optional, Set @@ -37,7 +36,7 @@ from synapse.storage.roommember import ( ProfileInfo, RoomsForUser, ) -from synapse.types import Collection, get_domain_from_id +from synapse.types import Collection, PersistedEventPosition, get_domain_from_id from synapse.util.async_helpers import Linearizer from synapse.util.caches import intern_string from synapse.util.caches.descriptors import _CacheContext, cached, cachedList @@ -387,7 +386,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): # for rooms the server is participating in. if self._current_state_events_membership_up_to_date: sql = """ - SELECT room_id, e.stream_ordering + SELECT room_id, e.instance_name, e.stream_ordering FROM current_state_events AS c INNER JOIN events AS e USING (room_id, event_id) WHERE @@ -397,7 +396,7 @@ class RoomMemberWorkerStore(EventsWorkerStore): """ else: sql = """ - SELECT room_id, e.stream_ordering + SELECT room_id, e.instance_name, e.stream_ordering FROM current_state_events AS c INNER JOIN room_memberships AS m USING (room_id, event_id) INNER JOIN events AS e USING (room_id, event_id) @@ -408,7 +407,12 @@ class RoomMemberWorkerStore(EventsWorkerStore): """ txn.execute(sql, (user_id, Membership.JOIN)) - return frozenset(GetRoomsForUserWithStreamOrdering(*row) for row in txn) + return frozenset( + GetRoomsForUserWithStreamOrdering( + room_id, PersistedEventPosition(instance, stream_id) + ) + for room_id, instance, stream_id in txn + ) async def get_users_server_still_shares_room_with( self, user_ids: Collection[str] diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py index d89f6ed128..603cd7d825 100644 --- a/synapse/storage/persist_events.py +++ b/synapse/storage/persist_events.py @@ -31,7 +31,7 @@ from synapse.logging.context import PreserveLoggingContext, make_deferred_yielda from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.databases import Databases from synapse.storage.databases.main.events import DeltaState -from synapse.types import Collection, StateMap +from synapse.types import Collection, PersistedEventPosition, RoomStreamToken, StateMap from synapse.util.async_helpers import ObservableDeferred from synapse.util.metrics import Measure @@ -190,6 +190,7 @@ class EventsPersistenceStorage: self.persist_events_store = stores.persist_events self._clock = hs.get_clock() + self._instance_name = hs.get_instance_name() self.is_mine_id = hs.is_mine_id self._event_persist_queue = _EventPeristenceQueue() self._state_resolution_handler = hs.get_state_resolution_handler() @@ -198,7 +199,7 @@ class EventsPersistenceStorage: self, events_and_contexts: List[Tuple[EventBase, EventContext]], backfilled: bool = False, - ) -> int: + ) -> RoomStreamToken: """ Write events to the database Args: @@ -228,11 +229,11 @@ class EventsPersistenceStorage: defer.gatherResults(deferreds, consumeErrors=True) ) - return self.main_store.get_current_events_token() + return RoomStreamToken(None, self.main_store.get_current_events_token()) async def persist_event( self, event: EventBase, context: EventContext, backfilled: bool = False - ) -> Tuple[int, int]: + ) -> Tuple[PersistedEventPosition, RoomStreamToken]: """ Returns: The stream ordering of `event`, and the stream ordering of the @@ -247,7 +248,10 @@ class EventsPersistenceStorage: await make_deferred_yieldable(deferred) max_persisted_id = self.main_store.get_current_events_token() - return (event.internal_metadata.stream_ordering, max_persisted_id) + event_stream_id = event.internal_metadata.stream_ordering + + pos = PersistedEventPosition(self._instance_name, event_stream_id) + return pos, RoomStreamToken(None, max_persisted_id) def _maybe_start_persisting(self, room_id: str): async def persisting_queue(item): diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 8c4a83a840..f152f63321 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -25,7 +25,7 @@ RoomsForUser = namedtuple( ) GetRoomsForUserWithStreamOrdering = namedtuple( - "_GetRoomsForUserWithStreamOrdering", ("room_id", "stream_ordering") + "_GetRoomsForUserWithStreamOrdering", ("room_id", "event_pos") ) diff --git a/synapse/types.py b/synapse/types.py index a6fc7df22c..ec39f9e1e8 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -495,6 +495,21 @@ class StreamToken: StreamToken.START = StreamToken.from_string("s0_0") +@attr.s(slots=True, frozen=True) +class PersistedEventPosition: + """Position of a newly persisted event with instance that persisted it. + + This can be used to test whether the event is persisted before or after a + RoomStreamToken. + """ + + instance_name = attr.ib(type=str) + stream = attr.ib(type=int) + + def persisted_after(self, token: RoomStreamToken) -> bool: + return token.stream < self.stream + + class ThirdPartyInstanceID( namedtuple("ThirdPartyInstanceID", ("appservice_id", "network_id")) ): diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py index bc578411d6..c0ee1cfbd6 100644 --- a/tests/replication/slave/storage/test_events.py +++ b/tests/replication/slave/storage/test_events.py @@ -20,6 +20,7 @@ from synapse.events import FrozenEvent, _EventInternalMetadata, make_event_from_ from synapse.handlers.room import RoomEventSource from synapse.replication.slave.storage.events import SlavedEventStore from synapse.storage.roommember import RoomsForUser +from synapse.types import PersistedEventPosition from tests.server import FakeTransport @@ -204,10 +205,14 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): type="m.room.member", sender=USER_ID_2, key=USER_ID_2, membership="join" ) self.replicate() + + expected_pos = PersistedEventPosition( + "master", j2.internal_metadata.stream_ordering + ) self.check( "get_rooms_for_user_with_stream_ordering", (USER_ID_2,), - {(ROOM_ID, j2.internal_metadata.stream_ordering)}, + {(ROOM_ID, expected_pos)}, ) def test_get_rooms_for_user_with_stream_ordering_with_multi_event_persist(self): @@ -293,9 +298,10 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): # the membership change is only any use to us if the room is in the # joined_rooms list. if membership_changes: - self.assertEqual( - joined_rooms, {(ROOM_ID, j2.internal_metadata.stream_ordering)} + expected_pos = PersistedEventPosition( + "master", j2.internal_metadata.stream_ordering ) + self.assertEqual(joined_rooms, {(ROOM_ID, expected_pos)}) event_id = 0 From 6fdf5775939100121ad9e6e3a8cb21192a5444d6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 24 Sep 2020 13:43:49 +0100 Subject: [PATCH 039/134] Add new sequences to port DB script (#8387) --- changelog.d/8387.feature | 1 + scripts/synapse_port_db | 24 ++++++++++++++++++++++++ 2 files changed, 25 insertions(+) create mode 100644 changelog.d/8387.feature diff --git a/changelog.d/8387.feature b/changelog.d/8387.feature new file mode 100644 index 0000000000..b363e929ea --- /dev/null +++ b/changelog.d/8387.feature @@ -0,0 +1 @@ +Add experimental support for sharding event persister. diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index ecca8b6e8f..684a518b8e 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -628,6 +628,7 @@ class Porter(object): self.progress.set_state("Setting up sequence generators") await self._setup_state_group_id_seq() await self._setup_user_id_seq() + await self._setup_events_stream_seqs() self.progress.done() except Exception as e: @@ -804,6 +805,29 @@ class Porter(object): return self.postgres_store.db_pool.runInteraction("setup_user_id_seq", r) + def _setup_events_stream_seqs(self): + def r(txn): + txn.execute("SELECT MAX(stream_ordering) FROM events") + curr_id = txn.fetchone()[0] + if curr_id: + next_id = curr_id + 1 + txn.execute( + "ALTER SEQUENCE events_stream_seq RESTART WITH %s", (next_id,) + ) + + txn.execute("SELECT -MIN(stream_ordering) FROM events") + curr_id = txn.fetchone()[0] + if curr_id: + next_id = curr_id + 1 + txn.execute( + "ALTER SEQUENCE events_backfill_stream_seq RESTART WITH %s", + (next_id,), + ) + + return self.postgres_store.db_pool.runInteraction( + "_setup_events_stream_seqs", r + ) + ############################################## # The following is simply UI stuff From 11c9e17738277958f66d18015bf0e68f2c03bb8b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 24 Sep 2020 15:47:20 +0100 Subject: [PATCH 040/134] Add type annotations to SimpleHttpClient (#8372) --- changelog.d/8372.misc | 1 + synapse/appservice/api.py | 2 +- synapse/http/client.py | 187 ++++++++++++------ synapse/rest/media/v1/preview_url_resource.py | 14 +- 4 files changed, 143 insertions(+), 61 deletions(-) create mode 100644 changelog.d/8372.misc diff --git a/changelog.d/8372.misc b/changelog.d/8372.misc new file mode 100644 index 0000000000..a56e36de4b --- /dev/null +++ b/changelog.d/8372.misc @@ -0,0 +1 @@ +Add type annotations to `SimpleHttpClient`. diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index 1514c0f691..c526c28b93 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -178,7 +178,7 @@ class ApplicationServiceApi(SimpleHttpClient): urllib.parse.quote(protocol), ) try: - info = await self.get_json(uri, {}) + info = await self.get_json(uri) if not _is_valid_3pe_metadata(info): logger.warning( diff --git a/synapse/http/client.py b/synapse/http/client.py index 13fcab3378..4694adc400 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -17,6 +17,18 @@ import logging import urllib from io import BytesIO +from typing import ( + Any, + BinaryIO, + Dict, + Iterable, + List, + Mapping, + Optional, + Sequence, + Tuple, + Union, +) import treq from canonicaljson import encode_canonical_json @@ -37,6 +49,7 @@ from twisted.web._newclient import ResponseDone from twisted.web.client import Agent, HTTPConnectionPool, readBody from twisted.web.http import PotentialDataLoss from twisted.web.http_headers import Headers +from twisted.web.iweb import IResponse from synapse.api.errors import Codes, HttpResponseException, SynapseError from synapse.http import ( @@ -57,6 +70,19 @@ incoming_responses_counter = Counter( "synapse_http_client_responses", "", ["method", "code"] ) +# the type of the headers list, to be passed to the t.w.h.Headers. +# Actually we can mix str and bytes keys, but Mapping treats 'key' as invariant so +# we simplify. +RawHeaders = Union[Mapping[str, "RawHeaderValue"], Mapping[bytes, "RawHeaderValue"]] + +# the value actually has to be a List, but List is invariant so we can't specify that +# the entries can either be Lists or bytes. +RawHeaderValue = Sequence[Union[str, bytes]] + +# the type of the query params, to be passed into `urlencode` +QueryParamValue = Union[str, bytes, Iterable[Union[str, bytes]]] +QueryParams = Union[Mapping[str, QueryParamValue], Mapping[bytes, QueryParamValue]] + def check_against_blacklist(ip_address, ip_whitelist, ip_blacklist): """ @@ -285,13 +311,26 @@ class SimpleHttpClient: ip_blacklist=self._ip_blacklist, ) - async def request(self, method, uri, data=None, headers=None): + async def request( + self, + method: str, + uri: str, + data: Optional[bytes] = None, + headers: Optional[Headers] = None, + ) -> IResponse: """ Args: - method (str): HTTP method to use. - uri (str): URI to query. - data (bytes): Data to send in the request body, if applicable. - headers (t.w.http_headers.Headers): Request headers. + method: HTTP method to use. + uri: URI to query. + data: Data to send in the request body, if applicable. + headers: Request headers. + + Returns: + Response object, once the headers have been read. + + Raises: + RequestTimedOutError if the request times out before the headers are read + """ # A small wrapper around self.agent.request() so we can easily attach # counters to it @@ -324,6 +363,8 @@ class SimpleHttpClient: headers=headers, **self._extra_treq_args ) + # we use our own timeout mechanism rather than treq's as a workaround + # for https://twistedmatrix.com/trac/ticket/9534. request_deferred = timeout_deferred( request_deferred, 60, @@ -353,18 +394,26 @@ class SimpleHttpClient: set_tag("error_reason", e.args[0]) raise - async def post_urlencoded_get_json(self, uri, args={}, headers=None): + async def post_urlencoded_get_json( + self, + uri: str, + args: Mapping[str, Union[str, List[str]]] = {}, + headers: Optional[RawHeaders] = None, + ) -> Any: """ Args: - uri (str): - args (dict[str, str|List[str]]): query params - headers (dict[str|bytes, List[str|bytes]]|None): If not None, a map from - header name to a list of values for that header + uri: uri to query + args: parameters to be url-encoded in the body + headers: a map from header name to a list of values for that header Returns: - object: parsed json + parsed json Raises: + RequestTimedOutException: if there is a timeout before the response headers + are received. Note there is currently no timeout on reading the response + body. + HttpResponseException: On a non-2xx HTTP response. ValueError: if the response was not JSON @@ -398,19 +447,24 @@ class SimpleHttpClient: response.code, response.phrase.decode("ascii", errors="replace"), body ) - async def post_json_get_json(self, uri, post_json, headers=None): + async def post_json_get_json( + self, uri: str, post_json: Any, headers: Optional[RawHeaders] = None + ) -> Any: """ Args: - uri (str): - post_json (object): - headers (dict[str|bytes, List[str|bytes]]|None): If not None, a map from - header name to a list of values for that header + uri: URI to query. + post_json: request body, to be encoded as json + headers: a map from header name to a list of values for that header Returns: - object: parsed json + parsed json Raises: + RequestTimedOutException: if there is a timeout before the response headers + are received. Note there is currently no timeout on reading the response + body. + HttpResponseException: On a non-2xx HTTP response. ValueError: if the response was not JSON @@ -440,21 +494,22 @@ class SimpleHttpClient: response.code, response.phrase.decode("ascii", errors="replace"), body ) - async def get_json(self, uri, args={}, headers=None): - """ Gets some json from the given URI. + async def get_json( + self, uri: str, args: QueryParams = {}, headers: Optional[RawHeaders] = None, + ) -> Any: + """Gets some json from the given URI. Args: - uri (str): The URI to request, not including query parameters - args (dict): A dictionary used to create query strings, defaults to - None. - **Note**: The value of each key is assumed to be an iterable - and *not* a string. - headers (dict[str|bytes, List[str|bytes]]|None): If not None, a map from - header name to a list of values for that header + uri: The URI to request, not including query parameters + args: A dictionary used to create query string + headers: a map from header name to a list of values for that header Returns: - Succeeds when we get *any* 2xx HTTP response, with the - HTTP body as JSON. + Succeeds when we get a 2xx HTTP response, with the HTTP body as JSON. Raises: + RequestTimedOutException: if there is a timeout before the response headers + are received. Note there is currently no timeout on reading the response + body. + HttpResponseException On a non-2xx HTTP response. ValueError: if the response was not JSON @@ -466,22 +521,27 @@ class SimpleHttpClient: body = await self.get_raw(uri, args, headers=headers) return json_decoder.decode(body.decode("utf-8")) - async def put_json(self, uri, json_body, args={}, headers=None): - """ Puts some json to the given URI. + async def put_json( + self, + uri: str, + json_body: Any, + args: QueryParams = {}, + headers: RawHeaders = None, + ) -> Any: + """Puts some json to the given URI. Args: - uri (str): The URI to request, not including query parameters - json_body (dict): The JSON to put in the HTTP body, - args (dict): A dictionary used to create query strings, defaults to - None. - **Note**: The value of each key is assumed to be an iterable - and *not* a string. - headers (dict[str|bytes, List[str|bytes]]|None): If not None, a map from - header name to a list of values for that header + uri: The URI to request, not including query parameters + json_body: The JSON to put in the HTTP body, + args: A dictionary used to create query strings + headers: a map from header name to a list of values for that header Returns: - Succeeds when we get *any* 2xx HTTP response, with the - HTTP body as JSON. + Succeeds when we get a 2xx HTTP response, with the HTTP body as JSON. Raises: + RequestTimedOutException: if there is a timeout before the response headers + are received. Note there is currently no timeout on reading the response + body. + HttpResponseException On a non-2xx HTTP response. ValueError: if the response was not JSON @@ -513,21 +573,23 @@ class SimpleHttpClient: response.code, response.phrase.decode("ascii", errors="replace"), body ) - async def get_raw(self, uri, args={}, headers=None): - """ Gets raw text from the given URI. + async def get_raw( + self, uri: str, args: QueryParams = {}, headers: Optional[RawHeaders] = None + ) -> bytes: + """Gets raw text from the given URI. Args: - uri (str): The URI to request, not including query parameters - args (dict): A dictionary used to create query strings, defaults to - None. - **Note**: The value of each key is assumed to be an iterable - and *not* a string. - headers (dict[str|bytes, List[str|bytes]]|None): If not None, a map from - header name to a list of values for that header + uri: The URI to request, not including query parameters + args: A dictionary used to create query strings + headers: a map from header name to a list of values for that header Returns: - Succeeds when we get *any* 2xx HTTP response, with the + Succeeds when we get a 2xx HTTP response, with the HTTP body as bytes. Raises: + RequestTimedOutException: if there is a timeout before the response headers + are received. Note there is currently no timeout on reading the response + body. + HttpResponseException on a non-2xx HTTP response. """ if len(args): @@ -552,16 +614,29 @@ class SimpleHttpClient: # XXX: FIXME: This is horribly copy-pasted from matrixfederationclient. # The two should be factored out. - async def get_file(self, url, output_stream, max_size=None, headers=None): + async def get_file( + self, + url: str, + output_stream: BinaryIO, + max_size: Optional[int] = None, + headers: Optional[RawHeaders] = None, + ) -> Tuple[int, Dict[bytes, List[bytes]], str, int]: """GETs a file from a given URL Args: - url (str): The URL to GET - output_stream (file): File to write the response body to. - headers (dict[str|bytes, List[str|bytes]]|None): If not None, a map from - header name to a list of values for that header + url: The URL to GET + output_stream: File to write the response body to. + headers: A map from header name to a list of values for that header Returns: - A (int,dict,string,int) tuple of the file length, dict of the response + A tuple of the file length, dict of the response headers, absolute URI of the response and HTTP response code. + + Raises: + RequestTimedOutException: if there is a timeout before the response headers + are received. Note there is currently no timeout on reading the response + body. + + SynapseError: if the response is not a 2xx, the remote file is too large, or + another exception happens during the download. """ actual_headers = {b"User-Agent": [self.user_agent]} diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 987765e877..dce6c4d168 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -450,7 +450,7 @@ class PreviewUrlResource(DirectServeJsonResource): logger.warning("Error downloading oEmbed metadata from %s: %r", url, e) raise OEmbedError() from e - async def _download_url(self, url, user): + async def _download_url(self, url: str, user): # TODO: we should probably honour robots.txt... except in practice # we're most likely being explicitly triggered by a human rather than a # bot, so are we really a robot? @@ -460,7 +460,7 @@ class PreviewUrlResource(DirectServeJsonResource): file_info = FileInfo(server_name=None, file_id=file_id, url_cache=True) # If this URL can be accessed via oEmbed, use that instead. - url_to_download = url + url_to_download = url # type: Optional[str] oembed_url = self._get_oembed_url(url) if oembed_url: # The result might be a new URL to download, or it might be HTML content. @@ -520,9 +520,15 @@ class PreviewUrlResource(DirectServeJsonResource): # FIXME: we should calculate a proper expiration based on the # Cache-Control and Expire headers. But for now, assume 1 hour. expires = ONE_HOUR - etag = headers["ETag"][0] if "ETag" in headers else None + etag = ( + headers[b"ETag"][0].decode("ascii") if b"ETag" in headers else None + ) else: - html_bytes = oembed_result.html.encode("utf-8") # type: ignore + # we can only get here if we did an oembed request and have an oembed_result.html + assert oembed_result.html is not None + assert oembed_url is not None + + html_bytes = oembed_result.html.encode("utf-8") with self.media_storage.store_into_file(file_info) as (f, fname, finish): f.write(html_bytes) await finish() From 3f4a2a7064f79e77deaed8be96668020abef3c9d Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 24 Sep 2020 16:24:08 +0100 Subject: [PATCH 041/134] Hotfix: disable autoescape by default when rendering Jinja2 templates (#8394) #8037 changed the default `autoescape` option when rendering Jinja2 templates from `False` to `True`. This caused some bugs, noticeably around redirect URLs being escaped in SAML2 auth confirmation templates, causing those URLs to break for users. This change returns the previous behaviour as it stood. We may want to look at each template individually and see whether autoescaping is a good idea at some point, but for now lets just fix the breakage. --- changelog.d/8394.bugfix | 1 + synapse/config/_base.py | 10 ++++++++-- synapse/config/saml2_config.py | 4 +++- 3 files changed, 12 insertions(+), 3 deletions(-) create mode 100644 changelog.d/8394.bugfix diff --git a/changelog.d/8394.bugfix b/changelog.d/8394.bugfix new file mode 100644 index 0000000000..0ac1eeca0a --- /dev/null +++ b/changelog.d/8394.bugfix @@ -0,0 +1 @@ +Fix URLs being accidentally escaped in Jinja2 templates. Broke in v1.20.0. \ No newline at end of file diff --git a/synapse/config/_base.py b/synapse/config/_base.py index ad5ab6ad62..f8ab8e38df 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -194,7 +194,10 @@ class Config: return file_stream.read() def read_templates( - self, filenames: List[str], custom_template_directory: Optional[str] = None, + self, + filenames: List[str], + custom_template_directory: Optional[str] = None, + autoescape: bool = False, ) -> List[jinja2.Template]: """Load a list of template files from disk using the given variables. @@ -210,6 +213,9 @@ class Config: custom_template_directory: A directory to try to look for the templates before using the default Synapse template directory instead. + autoescape: Whether to autoescape variables before inserting them into the + template. + Raises: ConfigError: if the file's path is incorrect or otherwise cannot be read. @@ -233,7 +239,7 @@ class Config: search_directories.insert(0, custom_template_directory) loader = jinja2.FileSystemLoader(search_directories) - env = jinja2.Environment(loader=loader, autoescape=True) + env = jinja2.Environment(loader=loader, autoescape=autoescape) # Update the environment with our custom filters env.filters.update( diff --git a/synapse/config/saml2_config.py b/synapse/config/saml2_config.py index cc7401888b..755478e2ff 100644 --- a/synapse/config/saml2_config.py +++ b/synapse/config/saml2_config.py @@ -169,8 +169,10 @@ class SAML2Config(Config): saml2_config.get("saml_session_lifetime", "15m") ) + # We enable autoescape here as the message may potentially come from a + # remote resource self.saml2_error_html_template = self.read_templates( - ["saml_error.html"], saml2_config.get("template_dir") + ["saml_error.html"], saml2_config.get("template_dir"), autoescape=True )[0] def _default_saml_config_dict( From f3e5c2e702fb2bb5c59d354b92dec3a46f4dc962 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 24 Sep 2020 08:13:55 -0400 Subject: [PATCH 042/134] Mark the shadow_banned column as boolean in synapse_port_db. (#8386) --- .buildkite/test_db.db | Bin 18825216 -> 19279872 bytes changelog.d/8386.bugfix | 1 + scripts/synapse_port_db | 1 + 3 files changed, 2 insertions(+) create mode 100644 changelog.d/8386.bugfix diff --git a/.buildkite/test_db.db b/.buildkite/test_db.db index f20567ba73e97bf2568a9577efb0f132d66c429c..361369a581771bed36692a848aa396df96ad59d9 100644 GIT binary patch delta 168113 zcmb4s2V7J~*Y~a4?-mg0urx(_6BHZRQ54n|JD{MVND+HO5Fu(*Fv=LCf|{6^7zLx= z6bq(lnnJ2+1`=bM1ryVw`OeJTU0CzH&-;GvZ+DpgIdjgmJ9lQznQ`5^dzy7^yRR&9h0#z57JR z_FhuBC@&{BYj)9+!o^XgB?Vy#JUfMu;uqyDELxhEh0guXb}z#;_jsO-C$zXlMMVp< zN*CuWE+vFGJ0W&SY2Ko&{M@Y4yppKm6}@74_92oYW=>vi-lCkv`9+0U#YLrA`Gt_W zaCTl+Va~!lETdP9gEe0{7t<$eVP0;2PF88ooV=_#iy*fpOL7+F6fVv$B;-cd5%QAa z+?>UESw)L-^U4 zx^VjU;MpPnlT6GP5>m#q8H9uOiPF5n#s8rgfx$eRhS_N2&0ex-5oTPNw{RBZb6$Qi z?h--J&X^}RZ)rZXe*vy@TsctE**UZ4<>78sK-wrQ5c(oz)fD96nkdLc>kOP5>5cvY zkn29Z6QglA%*(=cR-8rpC2kqWb%d2?XAp+`xrI=DSqt)3lx7tc6)Fu_3UJQiC5vbi zc>2RLfw_r@MW7D)N)hQ*9)3LA+d%>;Wm$QPb4YW<`ts~tLZTJ3xM<<5(#1uEdATGj zrL^ni<}ZR?T(oEf&X$i4^cb2XHwX7^XuZ6`(xOEPnA*!5ss~dGmn_K3%3D~xcm*`> z!iA9M#rb)Pyt3R`m^avqXUAaPdHK1yd4*ZDMu8`vlpnJ|7w_e%NX%P`8>1j+RvvWX ze|0m=?d}236&Bc)l@Q#W2-EVG7w6N`1a^le$4Qjtqz@?`0pJPGTSUf`a5tWvMtMjx zl6D}^WNsb?a!GEM(C6*>$pemX1-&&TjH%7^g6UCHfoYJ*D7hQIH~eH&OYMd$hO>t4 z#*5PHh6RQ}(m~@hQl-JFzoBn6Zq%QY=Ig8UxyAx%m~o7;hdxQK*In10)$P%h=!V0{ z__Ou{?Mdx+ZMJr})=P6s^MU3i&6bndU$L4fZ-Gr>@gw-}F|l`qz)qpGe-DZYo&p};a)|2i~A1q$pi zn&N*L8_NnJ{RDOZ<@ry8V?-zUKaGyzp#p28ssH=%7}iT*$5Y0x<6~Ku0n$D`_6if& zi7Xka{?`HWzq587A$tZ0@TPuHYMFi9#g9mjo&uXeS^n!784@h8X@u{;43fd5(f_wm z(u)q0L8N<9=6?^9fh6nr>HD9?$$%(@mV0=D*4BB3%6VEdfnZFoQyXh6vK~tqE&os;wViJwt7+*I&Y20AUGY&WQG-{;p zrMIMKrAMTN(m=^%Xfre!Dh%TdA^KnS7uYHK$JmGU^ZA+j{(Q9F$TPYxxmH~xcR*Lc zmFmWGLvio!=~t?E zaTK5p)9X_UIMyNd4iOSiJHTFo4g#G2I@rN-n;^D~&RV&J+EzJ)-X!E!t<_O!H;`$y zgZv{Qm%H5!sxSVHXVoZwWqL2AwtgkF+Tfq;%6}n1`|wITaF+n)Y2$)*v<-fSc0>Uw z(u?Toiu6f}oZksoiJwD9a;J{p2(2mV#~_p$0PIqT1jvGiRR??#M7%>v+mqjIGeFKRyRIw)gWy*Qy zNiRqJe1Qh6b>#hXn!lHQ?SdX#>!_d42yOpiyAn}NfY`Xsj`FDkq4^M>@)Kf3#P;)b zwRh4_90+P#n?6&ivnvG6`OMc9qiQs?EZtKfeN0G$D&F_x*do+iobIWnmH!cr^hD~C zbQcBru&yNCirP9^KERQlXh(^k#jgK-e<5B(wIzPtsHXL0>3tMU?-5NU$GntGoG(is zR3T*Z0@mhh^&q z-AJ85+oIi}oviiIT-6-V%+Z9Ze^5WCE>g#;epa1Stx)w7?}_Kd_2Mv5Exavk6KsMT z{|Uc`&*lTU>)bQkLN1p5h4p%oUCpMj0`ms5sc&VH!6pi0A4abIM@f%2+4*ou>zMkq_!)6tb4&u4HF=7c!`ic2eJZ z_3x@)-M2E5(v{NZe9v~JL;XcK6yW-^^^5&i{e?SeNZoI9&S98C0Is0c z+OP*JsFzcE_LIb}IS~6+hC0#rS9h%ev0r6RhkTsD*k59UygQo@w{H)p^mls4J|I7Y z(v=#py&c(=4mA+$q~Oky2Wy~TWst(5C4G#&6xp?oG#uBjGSEpz>iH2}IaD~80W=rT z%D9?RUwtqaRwoH}UU$*~{kUJHzY|C5=CH0hNKf}ubSORjqLU7&N?#`lC9m}8DuI;4 z$BCnM==iRc&GY>#y`30x9w`m-ap>rFLCPgeJjmQWXq(`2gIA4=&k)d zyVAwvN+Y2|^^wkYC&cNQ)IkkNP690J`#d1P;3QywppyVKG?L!IKpJW6f-Y@9YDnj# zp=6e|s|FaCv`+N$){w4rcq%nc3R2@A?Ct$3)lM8uGlKrrN-8HE?ZZ!Z)dA0g=pU8 zP7LKEAM6e5S2}4ZFLCPc{noE=PT%*zLME?Y?%=?qhGk{C)3}qoewh=!Wa_veI>R9^ zdF}+ub_~?)X1tgR#$+(PYV@OnA&w~#tDHez5DvHf&cXxMo!ytLs9d;-i(z$aDyM@-l85!xrQpZMECyq_C zO&k`Ax5ax|d!ffV*aI^9iRD#1SO?Jlj-#On3QNaW7sXxUwy?55*R=dyY7dxczF zj@crOWK18Mj+!=@rkmoWk+Mq*mmsM^!GvLB5xXfpkhRDqUvQ6(GwMtiV9sY*ZTidf ztLcJlj^Slfl9;p_>>v^AQ^4YJYrTnKjQuNSGK}FsMHIv69<6{Bnn%mWF^rlk@5wOy zn1|peJ>paf&(=K8X7bkOh0MSGk7u)H#9D4TLq-ybSvcdLxn>uFYSbUL~x$C z#P?Nc6IAXql8_}(@IXaIP9@9XLK}<5m6;})Dn?FJyZ7zOR)#OmnN^U7np{-f(Y`R@ ztD*nNG+=KB>)4D5*0B@Q)2%~?4NskzKEWDeZyIYTwJhZ4Li@;lq6KRdI=Fpq7)p!X z5UgPssd|EtXRE*MVqqiE)o)%2l<&*L`{M{A4aVX4};HS*)5wT*dfr97%hJA%`#)qkh|O8>e3BQ=UO@As6U zBpOaWds2H*#cJg0)7mvZ^WE9GNZqLO_2#^}tA=6vlltDKP~B?Sh=LE(EFD$9twv4O zF+ONoqs}Psy_x&CFn`K(n{=%#`wfn?%U&mR*Eb)}7R)DwZhuFgTBjF^Mb`8;V?wo= z*W9q;t0q0iavHq;u}D5U)rJ5$CDV119@F-*x-g51vq9frw~?nu9w`}4bG*g09jVQR z0M-NrIFmyPU_#b?I)P`mS|A39xiE>P{w3rigmzfW7-L=XqUX>=PIfF`u4%VB+gIImQX^u^O3TlSc zmrpFWkyfu5kG_3JdLbaf+SMXy9h^1-x_qzl@bXNx`S(UB-)4!_5SXE=HsxhCcv8amUKV^g-aIL>FFecODjU~FvG1E(Win-yD`FwO+@zm%fGWOS2+2M=yF+Iz1-*`FE_eA!Wwv! z#F&novQ3ilB^he#IU_oC+BFkN{;q#LuW9+)BnvUF_p$+Rsw8Ql+7P%tVp=e^A z>sfgca-GDZ)V;1pfUQ=+ln#2X8YeT^#9yQbaJ`%;61 z0@?y+;pVtuP`UIc+a)EcL&|Q8b`_*cCSYyS!}cvN-UetNbouZHp(e7t;3Hv39!=A} zULA}&KJ-_K-@$9>byWE1N`;B5gLlz0gsJuJ%pfyer*lONBU~ito$=#>bP5_^%15oY z*=QL0gg4RsE$Mb4X;2aq&5-Y1$Cl9_sD1UNSYS4%pnXq$fRni-pwzS@p&lfI+%*o; z@un7B4+BjdVD^%m)_wObhYNn!5ap7n=S5D@eI4GSm%7At5Pz4%7jFHbhGBIFGOec? zyQwoh)i4t6HZ3*IlkQ1dBv!g;{0O?5#(3zY)vZoKR!yW?#e!z@Mbz56rw@uz4Y8m< zB)4Z(revs$3#dHgs294u&EFzG3z^XV(vQ7S{g@~hS!Htj%iJGw>&|?_nC2Pp8EcJ+ zQa$YLjL}!?e$;-VU8i|LGh6+V+Eq1LyerfR8QgL%oZZWOqN;^tjD*S?RSBrX>NZdQ zW@Yz!R=ynKwwmWBR7^qb$8|oaXQW$Slp5i7LH;h%4Yu}xb0X>!?RHT!sbZYUJ%0#? zQh#CHQH|BD8;TBaQzMr^H-GsJtD6feCkMKn;N;Q0+;X^6F>XSKo;QtWOczXBOygm^ ze(JghuGe3TY%*$^DI_A#=MYgQ;)kkzVw&(Q z|2sdIdqP@lSfzhkpKOZKJ!^d4hznK3q5!kIhro)=Dm>sV<8{vh)XdFQ_o-N}avvRn zTEGjVOfkF0b~O}O{wVKLu}tkA62eu)k};+$wd>!O--nDFmO5dWbwcXk^kLSnBx|S= z&Cey{c!ZUxQvN1WKcJvzPwGtQ3wW=hqeHcNRJO9aepD!xlb@@kf%(kRils2-g>dDT z{KDM4<&=TiF0zzhF*tAJI#DdYq4qtlu2KJq&zdn-$seVLI;6;37IjY%Hhwyb=_5M!#6U_MiVkc8Jmsc zq-)YrNn_Y;h|r(aXXx6%rp##{)rM$f&0zJr>W5T!RGU<8VuKhioDx#`clb$M8#kZ5 z%dSNhTj&7O8rHa=Tog16Oax#8jB1M9i@C)?s9~*pu-usK{+_X_*uARQqpH}ms@SWl z*!xtmk5J5}A+gP;CyHwG@kL|We7sOTM1#f8#KYn;*bj9T+J$4n8s#%u7|ow#Bbhe- z2+RZ9PI$-2SKE9(<~U(AN-at1jRwBz6OP=k`6O^Ei6SknJ`wWKYd%>pfB6$bF^GN5 zCq^#1?z5TW^y4U(coDc}eDCuM%Sp>A^xpBo;qr+eeO`lQ$*JW@XwpH?@aj)*`oPy0 zzJBtjH~qt0*w~$-b;!<1)inEsYW-X-NLQF_{M1ZG$2yAD@{1a)zX;CZosqBRfDgb_}oCm*m zDo4TZvz1}++ffN)q~+)J3t$k-UOxuL>=Wyw8Adf4uS?WhD@uTRw_-fQXj?0K!)w8} zqC5Qhl`mr$eMb2d7_Fkq;caGQ%Dv(DX?VSHXJ^2$`Nlx_%f%mrGqFrHlVh3*Z}GFn zg+?FgZE2;{3;YzS4T<`1^gHxJbU*11>c;60*$x&E%gd@nCcVN!>T0l zy0}vuD*P-QfQ@aAZ-Q-Y6L%IiwSCyP*_CWB<_c2{V^>>0Kjc?2(;H>E?e2|UA936l zbNf-)%vDtAQ%xsCCz7C8|k-XoLKm5}kshDPhmW zqi676*7{nf#|}wkewP~2^4x9$ha^D^k@G7{DnB`F&3ihZawXS&f;i3j^6ft z$^{w6L-Zva{W87U1r>d>BM2EXu&z5*N^}N}7I)k0f{tS5=Q4mfOMh62PTvMaSvR&C zO6GG2s5shKx{*dXHk;-%Tkmg1KltqqM9XeMKCO#^Vc!IhcDe%i{cl3kk*KsDqPGXS zSu1=ag*LMaAL#HF5&_WuCM+iq?)69g@A*&TzTC+!VD8QpgKqT+83xS;xO z`vcK^EU3o&pao^PK{Vr9t}mK-3k3Q92y%D0?e;;(e%#R;6?~QJgQDj_GG9;NyXFQ` zF5iHf5O3}XVkh9-k45djtOMD1t1L+G158!jfhofsDrDC!5OxJeGe19&Fl?;^&x=1( z#hBKa7~?vao2)V1GpsRa^&7#7aidPF-3Xhn8#P+>Mlb|FtP;g4Q6*FfD!z(WaaEj( ztzuOu_3Vd()ocAl+q674l-lkGCc86Vd!UYW{w8#}&>EqiUzA_C){oNJT3?_?9tL`r z)`UXb{1v({r8i}$15u0jArI7;rS3M;-0 zHgBj2?e(=w^`i9DJ3w!Ye9WUR%GZP<_xLGFJr#1ScOa?>-ROZbUrsQgsK$( zFcjqFEX`Z%LR3;(rf)G6X}-#Y+ISF5+QHI=Zkx-NgXXAYE5`8nx~*%tEK{)+8_f&K<~Tuyr~otm@+BpYNB1ONJ-~M z*h!!Ffd<0$U(N`a=80@t(3>5V0S$25WI~(2g(A`BUCRRp%K5`Hn2L?=XBW#+vE|>p zD;l$I*eimiq|{IBq@rlg2mx!XvIbOH1FNh-Ro37tYe0rCmSrqUb}$*!Xd9*7$%XO1r*yRljVC9Ti0+|Rjn!xlU2px_p^2io-DpD9%$Qya0_}aDVcWXvdIt9*S!#7Mp;wr@*kq~ z)PTN9`jqehGm@s->60F$7r&ceM*V;9qvUTQrPo&UP}0|+I5S%Nr=325(y>X0=C4ui zL&uJ?@e1ilc&kI!L(z}-?Wx8&lRQwvMR@<;9+IeNwNZNeh(smn&9nBTna(67>5&ok zq#2Z6J85V#>e!uNLD^}EN?nb0kVc`~r+O>8#!$3mT!Ny@XN+A|x-&`9wRb{-l615} zudtW!=2^ScQO+bK>F~;UC21O^TOKngT3?=Ompam!q$K_Ff<5U7N-uAZRgzwwX-_)b znWQA$+8(259Y*QxlZTE%Yrl(C(4iD9afu-UJy1$+tkQ-9XNfKr(OOaO1~2TFFhi^`A7`B z)Z}P7UYu$Rhi!iS^XxAt#fVZ?u45xsao)p(|M4Rmg@`BB^|9~j*r{mUFjz%e3=ti? zTK6k^MYzJq#*~ytSt&x_LvPi0(;M}i?r*u#5@F#)APZOIv#t?+)PwaJ29^vWFL6Q& z`5mP9av%c;(qA9vK#~cPNPc0Vfbr@+-Vt&i@5py@lMU+g49s5ww6HY|dvfDU#U_<; zpD{{$O?{qk24B9L_K!5x`GW?oi+qt2!qA;fpUvWhFxcDJx_cJh9_4u5&w^u1@)R$h zua4{udvFa0$RJn6Y3Ej`RC%^|i9vS~A|ueH(J*vnCVHw-gAguNM{kXUZcA6^Ca0#4&f;(bIvrotc9kF}$Ja_`4+L%gD)I3w@BhUCd_7IDj{ysSk z&3g0QG1brBkCmUjA2*4W#Ge^(DQ48$)q_+wc6N&&2v!k+J((GQ5U=@|Kk!2Em+Bj; zc=?7Z!R>Ig7|uj61&r{iuu_bg18hJ zYlijmnx@sk=F=S20+qM;wzx_R7sE4P8ljnHLe6t4O+p6DA29AbmjWE-4S;vfp^61> zGr%xk!0coKWuHy}3=;+n<7ouMV0wUK&eI3LVOpRC+^}7$UEJJedFpUg;QeywU-JS2@m{<5dp6 zn0jV3-X9yG&VYF#QPg#sY=T5EB_!Iq&XI97!E_Mg&J$$7VH${Wr-?DZFa;#A(}Y+^ zfK33|(q@{>sxu>iV}=QpY^U#K57fLr02F10D|nzCuld{`XCRD9tStw;;Y^r2Dn1>C z@m>_a1CC4-&weigf|U~5e+v$*?|2-i$Uc>T39vB2RT}%UZ{WUzI6=#k0T_n`5ynfl!vv*x zh%3zSH-}&x7D5=eTnq}VZ~Mi>deoH%VH{RLgg-72=8ZFk;!?rt2SZIV>Flu9!LjzF zz`)1bLU0Dwl7z6x!FXzaF7WY|D8L)8U&8`naf5N&01(*t0<74~F9%{A7Bv{RNEC;< z+7*xM1Qs)-UplSu0EeXv=67D%WhF>l4HM4ZV7P&TA8fy^eI=^?mgb^;EE^oKoegT*PLv zQ1lVr5|+y|Tb6Bw?idfsp*z}N_w>aK-7%8l&M!G3K`3VGQ#*XvA?nOH%3*)oJ5t6{ z9KXszP7IBaS2%u80~Af6&i6BZ{{kLGap(808J-=w#h3{*MlEF3R!9WH{19inJ!Ax# z%X`$lo`8kGv=GDlpBX@5)EwOt<1ih>I9r#b;5D`=jKf?Ik z$GUEW1?0e_5996TVRn4RFpR^D591|24N>r#&#f4TnI6Wmzao`WW2%nBHL=yFyX^Mwd{G!`u$zmUvjHll-)FCvsp;hw-Mj?0B(jD&~g?9mZQf9H{W8 zB=o{KOyn?bTN|$6W)C6tdqpbusDNxq`ZYiAQ2gpV_YJ-m0He za_5H&1#kIvM*`$MF@0K8V5yU9&40MC2quOD$i{f}Ykm5_w=aB?;oA?s{oy-6eyz{I z{@;T6A8dmLJ^Z=^k!hS@Y%)%VBXq~58DPdZX~;If8}^|;HP{~3!Er8uDCcEYpZc~V zde%}u<5L$lP=zg^E&(X^`ujepW!G&>{qi|18zc?})QU=0d;w;|UGZ_E752kDP;8G2 zzNmTB$7VEW=#@Z`y$1W`sJzDo*gd@dBy8Ni^-`dUZGv5M3$nphWZmZn;>9x9UmuR{ z+=bo4Y<`y+U7EBPm~623z8S^NDTV#wW!YwQ_MIv~gJEBOE%a3Az8dI)GRF@MWV5Pb z2AzsYVa;m8pX}4(ItCfC_dM@kDXo>3ON*oen5mDI221^=L@7oJlY%8b$wP9fHzysm=r%eXI6|ScnMT}YfK7!Q{g)pzC++URK7K4m}`Z=UuT9Rv1QmG zl`u-%3_en0TZWC|xJ`0h%dkwI)BH|*=FpE`ijdPj7`8~o=`&H2;FEyd@}`6%pU;LR zPz%ol{%m;l@y~|Aces4~vk@6X1)-4{iA3wj$tcS@(kJ8@ak)5L>?V9AtQFD)FaCR8 z=FadN`0;!Ix0$<5KX{^l9yUHbMHCY|HeB8oFfxGW#LsCqw)^}Pj$A@UzQt+sD6(X& zEnFVhV`NVb)vh?+2VKa|2&=vjI#Rw6nwFg8FRfxGvW*Nk+;rKr*R;T-HhySaWK1#j zHJZTb;|L1QPTMJ;%uaj5z!?t`%RL{%=b@axV21_;;E7#w;?lI8YMSonPmhPok!#aFV4*KFnQ-)g z`IXmj-MQiXE^aCF27i&g&Q|gBxI=6Y{&(&Qdy4PFPBOKa8eoZ-Vlo@sj7N=Y;Iyi@ z^rQ5$v{{-ag&Y1bTrli3zoRZt?@=eKb#R30Y1MMoa4<)HEjB?99Sd`q8^USf5n-x;QkO*g z>!Z|K8$BGsk)?(tnnI#jJBFJ6@eDGm0g~(xhZjUf%;n;M$Sj|v)Y&n#fA4@G!e+KZ zu%d<8J;Ah{*qvEdf+Uc}V+cE3b7Yx9lnR|}cr^nqAt6z!PVCOsG@LZX01%|rvK(2J znN2FpXzFCdOFoi4&W@qlddMDTv)CcX9&FZ=?CCqPrZ`WkSL}kpVvD5JbYghrM$#HP zF}$MWp{CF?>(XL42tQXsV|iK>jt^AALM7Z+PWf6OG?+r7(?AyS!A2!L!h(!C9tt~B z!B~Wkv6-|S2#OhjL8v@%Ie=qoq7H)yRis7Z@L~m(`ey~z8n&Lsr?02+N0jiig5Of& z>6MheS_zL-(sWmq(7b`tQu+t0jJP3z@#^RW_;E-+PF$OE!_ZP^%j^7 zgc<%cyk*#Bm}lq%7mHlcKZO!&(%#3V(AQG!{grTGZQ3l1w?Z60f#sf#vX?CF7i0#H zWT>`s4@bQPe0?;Che|?owo^$+2`?()AKTMnvBZc+X_y5ek&K^DrB+|;5o9K{8dHu3 z6^gchA7QWS-<7oCJE)H7O1MP{&nn?RyyvLJH{JS-uDSQ zqLn+w0u=jDh=MuBD%9TV)gbcN53i&0m(|fSJ)?wQD4`kA9=sM;6X~wlgAMn%m^F^_ z<$N9JVxo$mGBWI zJgtN`l+b54)k=6|H`RAl3C)kwp&;Lpxt!8Os8BK#%u~R+Lj!_odoodT0izFb z>Y)KaIv6yWs5uxzUp$@`iaUm84^`bq3Fj%{u02%sTT1*-B@Ek}76XmSM9p-FZvE?K zFc!6CZ`uqT-q=e;`s}0eRr_fCSta~T3H9|<#ejO+o`v;kQ383o8{jW2sfC7B4QZzu zMhP{8t=P1^*7&z^rS!YB)Nt3ZNZ+9^)ZNg{*Iw5af>+sEjY_>$-Cea$)dRer6NR(j zuz3kQg|2dQ*jwyUa6bm)c8X@ndRX(%u(=^N`0yx{a`=oE%`m*;Nlb)5goQsQHoaR7 zTT`xQLMg|w17{o@up5CnYWIOr!rJ$kJ6gvzTPT~~*jvMF=Lc+2*orfncQ{nR_M0=? zx?yJ(HtUC-8He3C%m%0R6lrZojw)? z79VC~{Y5*QPm{d}>K7eKVaJasg(Ff*y*=}yqwZnX4-=cd*rgEHegC8)qvp9z8L$UP za+E$DHcP&fLus{_Nu3<98Q7HrHvNPX7SxhHU5i>)yy(a?>}+k3Cm>oqK-4hnVK;FG zcokTB9Zp6mYT%HgPJ~OutDtN>Y%XGREg`n#?gpFUNSRu+%htmNqe2^n&W(1!u)m1m zob=<0+87NMnl6VEOL*CO*l2WSsObTvJTmvBT`=r8Iv7YtuW z{GYOxI2gJ;$dH+S+=6a(GQhsMvxFGA&Vqa`C+vBE4Rp*9J8G=b^x5N2fs55}hYHw9 zcVeh}0R*J2aL{2(-HDDempub{OgpKV=TkpQQa4^>xGr2Z z$igwh!@@elRKaG55CZl0`JeTd`PcLf{7(HMevW=9pQP{3^15%;`*bg;i*y^+DZO-4 z)fQd2>MQL%)$`g*s!DBxwnQ~iJB0Z_)l>Xk>!!I5u9XjKrl`p=3v6EMUT^-H2M3^6 zOJf7+DJ{;IdTob?*&VLAZs_PT`yB?Hijd2vj0;wdz28T+6?Q6nk5DZz-BV7yb1db5 z(tfD|#}FVkZM^+ngMTnkH!#eRgeM%WFZXaD96?wRLX~nNDuXO35j6_v%%7I9mamFd z#B;6$2ngzw??RB;N#pD+76RCAN7!$V;LI4|h!x3R5bB7Imk)NtNF{$CX_q4rAZJ#j zgT+7)JhQSV($hq}I;&TQ)~!f?NU6=g(6JTvn-*FvckRj8KS?r-D0drNqH^AhbTpG6 zvX!Ps(JK~8(tF!8^@ASF*7bnti*zy{UyC4?URb66v<-v%J4i~9yrm^L;sup zA{>HQpzp7jbXRqax^i$%4btA#zOF^udD^~ULj6p0M6*USR%2D)QNN;oRGp_zRI622 zREJe7Rijlt;w|x|$@nW1Hd?*|ema)@>k!*zE$=u_A;N1Ds?05WD_C-F6-2}TW zEcYH5ZWo(cO$WFo9I=OlaNKa^9+yX#B--zB;ZjM#?qu2@H!g($txFRf?Kp@amZV9J z?mrA8u#Evrc6+}aN|?@fKF31{VEch}wDn-qP_P>Gj8K|+ec`bH+6?ST6uH&$xQ@V) zBqX|L`P+pLC$d=gEPJ~Z698ukHP^0GqfJ}WUqN>>#|P9m-+}Q;e3Q^|A?W2Ii?*eQ zDdX2bDr-feT^8+>wJQ@H#T!8HCLtCV+$~VcHn{bpbXjMuDQ9DrIfUd;g{wMitsm)H zIos4U3t|b)@|sQ#dX~ll2+ib$^^Q#TCCs+93Q5J#0+ol`uYckC5Dox(so9PAZ)gtK zEwR^F644$YPbad~wBwiGN^&%6B25p7@+h1oK)|&m`@qnfJ{xCjG<$3W`m(~&NC`NS zguaOz=V+vOBA`8PoFb8Z0vXpk62#$%az#w-fN_pdDHaV_pPqo;zn0|ak}*V3E9@R9 zkN9o^SlGUf+($bQCHLD15TNbr5FCY?D$;wS@2b)Rl=na+YOk_qp|xMEgEIn|tJ5bb zOyOu?b$TM2@L{6WUYB7+w(UcEW3va*!fHpsdf|vNJaeIF4@8un){~?w9~f(|Vmd@x z4h{%5(340#Xh*{rVUVJVi>a#dj#p2J15kQ)0s+py+BqTyI}oM1XkF~@6cR)@FrWyb zZzfBiqc;T-4glbypGMfhn2EQHrz#Jsur)mfRTdtzDiqF$6=!IyXP@rQCZfcP&-PH+oLwqDF1#w<2+CNS*p9eks)_`BeUa8b_x<}Z1Hfq&NkQg zL*XbmC}#;tf(e{d&hdyZ;z#iQ+;7}v?lL-EW1G$k_tNN%( zZM1bqD;sS61>T)^M<*I=pYVDOI7_6MvskoliJypS*y2FB`$5}JYEFL%wT`$Sj(R_9 zdyF&qqVqN1^g!0vM~LY40$9XgP3XgUsR8wu2e`lmPuDl}3#i_H#3pY)GOp-(8`!Qx z_;(n=PkmMWh&oyIovKbXRQw5!xQrG47M|hX`Tw=8qjA~e)4)8Vmdo45h4HnSf;uyo zn%v3{fqgPN0K8LErV=|dxI+@-^+V|}naB2Fb9|IpOwhzI^yHR0b2d%c`X!WvrW{LB z&Z78!xW$Cx#RJvYWSFC9(qt-l&3R&a%%*tkUFZeGKemnHSrjLCLJ>UX6~aH0;jf#Dhj>inFYPoylTIKIUKV@Eq?-Qqig%of9ZIT^-Ycl39P~=M3xo z*uHFi1{gQD%U@a|Dp^yT==4F^t`SzbDrmxWxT_$!({d?495&%NT)V|EezmDy#F$w< zJz@fUC&G6Ud>?}EWO;hTl&9C1B{EwX)9a>9rVLXz<0s%GnFkZq+tLYX1zb8Pf^*Jx z!(@Y>{*r#JJ_@F%TXh4qx3v4TV>S0RM>R7wM)g^Bq1sP%NwroLC0-S`iUZ-kfqlYQ z{yu+{pUE5Htz$KL}2dTxRRw&dXqG4{ltk z*n`ow<8fe!t{QGZXJn|^=;t9yyu?ZLOH3tB;WJ(UY8j>yAModJ^fHbTm+xD-;Op#v z0-{$x^tYffF9Nj)M>9i%mFPJfjh>qrk4z`wubRt!E$AlZ%fiuy5{Pbj38Kdcb#0yn zeSxF@;HXs%Zs*6a-^1-w_UJ+ZUfJhfjDwR!e5?gsdO0ot`QvEz!bCXHb2`q7%3iQD z7cYeTZIkU$zXcG@aEOx5fTAxkwej6}MbRKflqfR21d@rL_q#6xa}idAqdU(*E-qs| zBq`Rzj-_>MT^#Cyh^Bh1iO`9rXv|)e6%8e9#07o{qRoBdEa>VnC}EZ(imB`nDy*X_ z&IPsULD`q5fjwp;G#H7R&Ox;0c@S100X5@gh+6N$nX*5P5G^|c(T@F4d+8`m*cMnoVJS zHRXMP#HRGFhr3AFShftMJQvqTT%9b$+7P>KxGP${IV^~M5jAWMOJ-$s`??rDwg9GR$4jNF#vAAS*AKJGSVtY`=HVAf~ zYM#P23fxU>+%KM*IpS$?sW{Z6V#8tMIZbF|{(y@HXDxc)(a} z9B4F1pK>AG%V06yt%)(X!%d$@?H}y+87^Nap4x+B1!UWNFA=@5ad$Y%D4qHyC%RCR z{2R50!_mscQ+ILVClslQZwyC^mrTv$q@fszLDpwi$H@JbPJQFBpowL`s&1Xu3&mKc zEr$Cp_BXTzp*EjgD)}Ajv^>R>{(jNri0U1o)8IQDzBAxEQ{EApwNMuH5!mIXEo(Fy z*OoO#-rJV74K|^}8lq6<)9GGl+m7sR)erxe1>bD==D>HB{P2&nSDZj>L&EDS6WtNB zeSARKQft9mX3{9fmYMNEhLn#FSkQv*j!ik~K?Z9dIGgz8T89J!Whe=3QdC@)PQal& z&lC2&NBx5g#VH1?)XaJ`M&gWr+R2uA=c#I2EV~K5pMR)I7-0b_+I* zcLg=s8PpFlm`_6g7d8XxnoKlRjhrG0S+!&&^r}J?F?%a1~)jXN%n{v*B@0%SOL>8y!8q< z%PBN+K=;1NAsyyp&J5(p?n09ze=sw{47Kk@6A3T_tUlVG)?Woo&zxy7yWnuG`Qf8V zAFF_QnlnT5!Z0&>>-{74yu+*wGqAUOsr0(9^1)qev!iS7ih9430j73c8A8#Q<K z9B)OBw556S7nzHwrNO@k@i~&;?U*x$=d{1jaf=_C8!oTDJ7ssGpZr}Z0{03NDT^ud&wxenS=7qKjTgoXc{XHz zsx%Vakm3WY=MR|=-v#pgAq8lUz}#RWVGJvOuWmtV_AbM4%AE|u+I>)El)y}GP7hSF2&;Cz>W z2I&*w;Q1AJ1!qKL1ge;=nZgVkdMp2?JG%5rf(t>Be$YrbAw4t7g8q4~BM4O*5@AYo zw8I5`d1pib3XwnrL=)J}sOeZoB&vVXBLKM`>$pjyG0*x0z}gj)&OIK=v1S%slt4m* zXS${SlU{IRGV(Oq<6&Vqess`Cm?wmch)4Q(P<<}Cr=|WKN{JuoTlYSk%k$q927D$+ zm;IH$rT$zvPWPJt@wPzVHp2@)5Fs(qw>}g8j7Q(XpQt$%W+L;WV2zWcO7yL}2D86Y zM`2deQw>DyuZ*@u192INf}1kBilY~B6niJ5tFyrQ*j)qB7|(uiHn9ojJWJ@DXPaA3 z7XqMjP1<8g6eP(Iia8uFEt!3FjDlETb9E19LT|W}on~ph7*& z`ZjJ416AQr5BWRbuWWCSQWFkYdaMqjnFg4jeu7!B3o|8XcS5xJ!+{CNYe$$5YP$-G zvOXRN2hr;wid~m+G!VUr<)wtcd^hqG==~nc!w$?e`sSk$Z3%*z*tySv`423#?FQ5q ziLSs@?A1)8^-XwUb`z++cv5~FN3m}+iLyDTtVL{#powGBk=Ri*9!>2i8Ywq)6m3^# z71vM9i;#EzUR2;d9b96=+2aCj7-MMupS3h>e#UiT!6To(9RyaETp+Xjv>)2)~N9XbBcj9~J` z9QbBln$rA6p#>KkOud+ZulA&bw!uY&0482cIFFAa5na~cL4*LNT}%jg95%gZCZ^g5 z0c^TNl;0$Ukn+rYLV&3)CX`g@%;|T$#w3x6z)UG^x%mPAysVpYo>xvZ}#j^+j zrm&b0Fga8qw7f-iz|@tb(7_fRIr(-rpVTGHSTSCE6N*7_*HHwAi7Lj~d9?e&v=m2U ze}L|b*--P8T*3#_QA{|`D{@iF!*d7$rlFY7I&Hj?qGdhpFEIVYgq*zbq+7sb6Gv-b znXTkF^X)uR6qspZ0=|%xmNC0_Bq6{I6B7dJU?YT7aAQ2Jd6-#Z!v5vZ%!E)A(vzfs z86_s*dzESXH|```S1^&pxTOxxosfRf5=Rc@!2A*8_}*EHXG4Ec_FJsQ_b&VEA7y$o z$M7BLkH8j)!PpAk%Dtug;QYHz3NTzlVI9TewLdY+2fz2@Ww;*anhu7q2hRW?*Uj$- z)VDlqfqNUR_lmE*$NGb(cS*9g0(L;QFbkLkD05ZsAhe*Rq?Y5ZqtrfUx5-;tOQx`> z_~^sA)uY}okw?A1NPb7)&oZg7*<3nQKal%XAHYAtPvtw%)P~Y{Z6A}0e&m{l(kM9H zP(&7Za>v2aGH@Rk@yXS>Xu!6NaG8I)6gp90MPDR7+_w>{>sK+9zCzkiYx5w3+7&&!8CdnOVdn zi;_PtE#{=3>ReYBBCjt>uW+Ua)Y`gW4w`zk^gXB`)X^<|9zXZtWMs@+H1||nDch*# z{h6U$2!s60K7J}#>aOM|HEd_AdD~4m^-GElbMyXZYo4Q_GS$#H(*ajg< zKg%J~_U3Bhs2Ee80~mFQZ2c@a*ns`hn5gn>%!$^%za&&g;#nl#2yv|Nv-hb2;|Emn zpbwVBz_Pp;0-|Rs(SyBQ_m8Y3dZNpxD0;Lgwp&)P5yV85Pj>LtE=1JPGx0+zI{!l| zdbbk3t%QFoVdO`YK2r&|DPgk`-uZ}>Ii`G!q8FN`-xI;q|L&oWsgg|}Qz0)a;SDA9 zy+ZM^N?4|ZjY`-KArv&Ge27DM>(1ALO}O%+$_G12RkF2$x^yo6gbI9039l%j_EU-{ zE8zkq+@pl=D&c)4jQ))BXDeYXgrvr?OEh)`)uNKM9i+yi$`c$|&OU>J#XItb{~Hn` zc3duxb6_QxMg{R84^@3m^~g&2of3L}LGe*axJn6+DB%}MXuL}K23=KJ0eeT2URED4 zl4og6#gtLAt>$zu$vfJX-R3wo+zW`8Xo<@&Mab8`UN)ZR^w)_MpRVl+ zzFoEwj%j>KxazaDE^N3w^2V~$Uk~zQPBW&}#@~%grJtl?!wtiHxIT6coTbTLA`)8E|`1?58k4r3^tJ zp-6f<9N%74R*_2ubkZBY*9s8tw3QODpX zY5vz%faMTt-=DZeNt_)5i7)Lvq-X~VB2L>$Z=AT_jP~w0Xs-Y;C1QpGf zZWr{=w+HQ|1B0UzLtWe!SOe~HWDpFGn4WVL8USaI(En^c;7~t>s*inXwW5Js9kzVz z6A|`2fWa1vNd0E5Zg9w$ zNM+c9H;zJkuQfy%iMwbyQ!#-OSQl6fkt%MvQ4L#HD-T47gymG*a@OpxFJ*%H4;f*R zX|hQ$Ho}2O4OpJCC4=DwL$3Y;3hY=OBWHFj57P4n?=JUR2)6R^JX$wnaTKa~`p2MZ zpTC#OK7X(H=Fu|g9y6Btn}L0hgFM4622ZD9f;-op|DOGpe~Ep8-^_00XNb}4RFM%{ zbytOhx?{o;-8x~o&ZhZ9^NcP~vr7B3CSCiQ##g&jeM>t>eNLOCuF-1LIc&H(PI6K6 zhObp07*41T88)bv878Sl8hWZc^}mZh=-(7yj%5B8AJOj?XXqDzd*&r@$*eb(O2wvO zQY!ZEGPxVSGrp*ojGK(pj1dy5wQlcLn+f-KceguVNdp|7E^~<6MQrjGhhxhOg1~;0 z1F=!pjD-%DkW9+bxZc5%K@i(9hY#gg0%ZSC;BaXggAsc0%I?+zzJSp)NB0fO2js2)Ajz~QraT>*Q>i}xm0)hxzs%^Z&Mm=UMkmq{hkA>Q*z=7)L#`TKB zmzYNql6S8*D z7CW$)aMW5i*?|TE;6pdmhpS|B`c5?D&>=??Pj|?)*ZVXo1BBX)$vG)R#P0)QQX_$oc0-+9ebqOJ}ARpp?$Yd{hJx`ah;|`Pjb$ z|Bq>k=hORlXWDx#d4TXKH@ojvq2jw0;2nhEj%!hjBz@X48Fog9)1Y}{`qrw3m8Tk3 z3C%(pcZ)$j>+-x&Q1^U&uuQvG_+mN+k?SyXJQu~L^W~WBh=dsHs!utCPT+G!6M=mlb)P&{ya)^?sT^;SkNs zx=p&tx*p)EdRP037NyQihKo!FzN3=g&Mr$3X%k5l-FC z&}orJ%!YV0CT4@Dk%c=|SPy6$@(=rlP2|{Dhy_3ir=Zf<4H=wi5(cNBz;olmQPjqc zKzV-Lh8qUrO(r)D+K_1#*k73#xD9W^6c_d!yP8d9zGBwfKXP&QhAI}!68~EDzkjVV z0M&5Ou5!if4P}pngD3xBIC+fCx|&$+&u|gdP2)iQx5f{Q#hO3VNpQ>kPR(q+zgkqi zkK#HuWD9sbFV}Qz7@+2~=V=wanYl7tj{beac9t`EQs|o-*CXWMKR3Xozs3XSYInOr z9#Q7zS5>I(-iBA`I{2-&>*2ES&xU*10(%D=X|t<(s+iX`Z)kR)#O$g}Kf@^YoZt?P zJCJ{#FXuC)p}epD1otCcrVJmsA-l>3HxDak(BE!b9U)JcRrQ1l75g1eLcg|Fgv(R& ztFHb(!oCAMisJqM_WGq>0@6zgJ%NO#bP_s9Nl@v8P^5{FP^G3qXoiH6K@cv0f*>U* za)Lkt2m(?P6%-H%9YG;2xz5iEGKO3Aww}WIt;4!c zgEL}|W;kPx&fVT?to}o2f$=_rXpk zbgC8+w-9yPA0@~-&@~rz`}e{dFy>_{r)wsKmKIv)rlOdwq{udM52II**F*r6lZo0u zm=FRW^c?|Qo($JO4it==!8zU*KvDf^?f^I%3t;f=F)Gm7i*q!R996yC0dO>w9Ahwu z+-eZDZ#3sX8YNeVa);~59+VGB6WQCx5I zw=k1%AnLomuyZ1!43c}ydwI`dzJpkM3lVuFXnrHsC1`#M{&t{Sn@rdVaXklxq1ZUiYiGi zWym(QVKI4E-|~7h(D>LUPht0~Gr9rmy{QD@tg|}>vcfu}Ls+M;QQX$Bbs)=Z`ZSc? zSP%!}C3NNN5*6QBh+>efoSju&)_y#@O~gs1hJv>l&=#9ZU^D!@Lm}RaErKEDswh^+Fgc8fI78l4C9R#>1GY+B1JHZObi@u9cj64jR zXw^eNzJ**;Xs;loZYYzID&k;Cf8J#89#`xqn?}XuM{yt!3~v-tqw2Ka;=_L&RNrC`~U~6)Vm2FDx68bTOYDcPKDWVM zpAgjpo<03h?COPLVKxlogHY7#mXf8ulA|EEoMMNQA5;PVZYqgoUz3o$mAKw*#I=gJ ziU;8St{Q;iT`!*Iv75Hs9LF!N-M)%*S-)raIq7O_1i(E0t^tRQcr z|70~4mT)o49R`I;xouf-!tIO%WE5=6!Y>=Y#rQ43Zz+Du@XKKZ+gLllFx|(B?ts67 z0GfT3F|2eR>+X!tV^*Wy&&a2S&VxHxv`tPFh5v}7*w?1i&6vLrE607y@%s$F75J@m z&foX>wCDjRBTs_(`xD`P z)>k9IcZmqrbQYg#Y&mZ_lT7%FR0;S#5kc(7i{z^?VL}#s9uhuP3Gkib60WIQviLsp z@%K4alYs9Pm*6o2Vw*Q)suJ+sB7y*qnGnkiSf~=<`$Yt9GoqIZ0}wcTp{gjucZ>+M znGyTBg4REL&p4)ia0*_5xqms086P(yaD8}+zaMR9FDq*j@R9Q*$a@V5e*G4x&EvC2 z1g$grA7d63f_dr1eCp)su93Rfe1XOuIytM0h|?xz;mX`95XZtm8Ar2W*WsXbgyP+ zs6z)A2Hu25KvP#XIb^nanQ&#`1gQi1AAZ`Jf^ZNGAG5xqIRas zz4J+_aCIO#o)cR(FHANxF%>QkBuAUz3K7ZQq-rAI{6HeK=`M3TN|7FP_EmH2tLNAq zbL_c0?Q4|C=EkwQ(g?&0FFJ3(HGWNf4*N9pKIPrY>o>14winhMtI4v`($IVmfv`>> zcCyER&K6N?cIXYh?^sy0z1hz+1?j-NPOclRH;p1Y>}Xbea@``zDnp@-ZG#uan?H=L ziD1RSQ`U87g>8!boVAXuJEGV7zO$idz+lDsqVpQlLgCNJX>V}N0Wp`3n=zN+$-QiEj`H|MDhyI3ct!sCO+OPlD6@#;U$ zHRN#p;6sjSKl5#vBJSwKRQPoeqigzaUx7QC+bJzbguX~hg@Xsb=h`|Tre&(I*Gx=> zj|XwPmI8U#k`OGSDQ@?;&r`~(A_9a6*TMi~stCIfp{)&aR-*mL*jZZp{*qZ;BXEM} zWs}rL&QA*PSSpC?FY!y(o)=#s#&hAo*7uvGj)aep<61zJP)~BZO-B|ZmZZt?7IIw6 z2~PIs-kEBDykvhomK7}Oi>WqNa&e|6$0qyZvB+TC-k+}a$13FPR~H*XMBc{fnh3a- z(4Eni9_+$#jT_!2{JwfDL4=5??k*7~DFO=;cJaNbBG&nYR3oapv6P{ogo*`F@f>w1 z#_XP|-ByEOb}i~b)laK_Uq|pt zAp&@aHOsj;RoVzs;FBV2yRb`{Q{}5E1uiL6t@)3!;+uo)Y(#@8D#6S*2*|;V^;a7a3{5ni6yAX>WEAMCiZ0Lnwei3w!TWFT(Ox&ahS{#z;;nyL`&s_5yl?)+ z9Ai3e>S#PZ$gtWQ4n~#*0C0W6Vx%8Ae(S^v186ZCaT7;l<`CWo1pZyL*Rb+ z)v>K-K_Q~3q$COvE{fusc+{HIQe`M92}DueO-9zfzIN9A$p_(nGWko&I8Pz9*oBKO z{;`7J<=Y1rz83z1HAR>5F~P_j14llVnUbh=Y>bfUVk4J8Hnx&>@gE6-M9np@@Jgb1 z&LMR)fh=~u>h>3OJ+Xy)$)g0L%U7`Q4sJ~T^+a`8M|u*}V%=|`U&73q!cEoa@7FDa ze_)RHI84yyC#rF2QlzO`BVgom$i^`k0p6WO`6aatdS z5(WC8`6w3qpcCDq$7-FB#%dP@c`;&qKh|V$lXY3)&M30(_(Def+;N%=X|(oqcx3m| zTr#-Tx~!XHV{)glNY|DAu_i;Btvy{I+06WL>R5BDHTe$IfyRZrD#-nz<0Etba3wo05&`#z^lyu7A$%QETTL4dxjhmfzGV26#(;QO_CIer*-SVEM!i zWrZEqer527r#z(srkBSxb8PBB!-uh~_}r3k)_jvA#CfFC+G}Q^s!{VK?94j8L&?|? zyVkiQc3rC;mw$=!&Y> zOBLQku+Z=u5zt>$tyJs)(vc z{oQ|cy%bAf6=g8TruBJUL~5wkr)W#dP@@GG%YKItI@;EYMq0?@xb>ou-1V}N$|R8Y zJvIq0Rulv@wUX9IO507ck&0R)WP25j4BsFcS-wFu^0Pd?+8`Q<_)6e|zv81;FSWEL zIQXy8QgA6vFa_N~a6cMmil!=txv0azP@|PDrRGM_Nu0z7|aLU7^k2B?Qt2|zkNBdSm7mcGI zxwlP5BiuDk&;5Vs#zm$~(pui{lc4^woRO@SZQ^2@ZCf{l#&h*HF4B2w+qwpJZ}SPA zuX0l7+svmQi!4}I#!EL&{l|g|>s4jJh4rc#%_o#WWW4FGI8W|h_u6RiIc9S1FWfL+ zq1-G+*6;W_c%rU_C+axX{KUEu*#4}KBZhwZoWeB3@h)8Vs;bfkn-bQIU}H;ks)Ey>B4f{`t>uv&j#p*U1?1be(g=(=S#mxA^5?`7=R>bS7W6Hch}8 z(kjLoYdR-neHn^WD>}q~E!}Vg^+CyrPk_ER_e-Zacm4CKMq?L>#ayty32R-jzSCRf z_yojZQs5M%7eb;;YLHu)pJ3cg4Gk0h6C{*NS=)Bk@-E?Jy?MQz$KgzY20Ou(A^ zk10_9jYjP6Q$uG5vhHV5yvvIR$Kc~b#nvAI&D_^Fklm$X+L#Gda<=rsVw*}?1inrM zEibNG(Y^{Rc!KFFFW#kM)Rn{l)_N;ckCQ(}62#nI&;nmP2i~8wB1fxh#ZRvegX$6+ zj;;=aM|JN+Y?v7f&X_4fp`St=a<-d_xxE0CqqF#BU9q=l9Mxrv!Plxk_ALycVxBVr zj$`N1))p#W8lo1fuWFBNE&mLHT76zzD67=cv?`a_cU!)VT5?$ZoKyUK<#Rjs->QK~om>pLoXGU2ObDibP23PzvT>AWt0B- zalf$b7t2&)1=y9qD_GtV>i)WZ(3vj+UMMc!zN%pZ=HtM{@0|<&+;yV@u>Wc(ZqrG& zeMwgsC2GNV;WrQ1rCR9HAUeycg==kV| z6mQsB{FO0kWIbiL&du8Uw(nKnEZ+}(1AX#+#`u)?K8)bpL0%`krg_!0{bozC)q^YP zTx(;?70V)|mH!J7Q`(wJOsh;?j8BbU7-JEn;A=x4B=r79KS=MR>{5nzPnlwbwKO<8 zy0l_k7GS?_jY-Aoj`k3!`yjdD3~M{SufW53lx&Vz-4WpCG}ivB8Gt2C$RH!j)ZFBVZrK!fHnPLC*VNgrW%|Bixv*7^g)OkBtB(%B?~!# zwF&{z2AHKeP#7s1#}b8r``7VG;6U<2!GVdRE4a+d(UPuY`$R6FuK(`rU&GM6`K5o7-vC{VqEmc& z`_%IO&-7bzw~erFw&hr7dXKSo_ikye>g{JK@w#N$Z&~7%XZg@8#qy5D>ebVH)+^M! z$@a)R!*!UR&4`k)lxp!eRld}__vdx3!zPJ zT}$sXM|!B18rD+{_PIbf+nt`IIor+PH58p)b-$Y`ob5snYc-$nn!;*Yc(EzGBG@)l z^~amc)-0P=OM3JPKg{jVtuUU|ML{x7<6?mTbcOljy{nMR>&^IHH-gcTD?1 zP+eZ`qjqEnOI$UrF$>)U4|}vJBLpUH`c##@dpY0<=9!u&>vYk2hjcYS;4EH|GOx3C zNCU@q<{A^l%{*Z}|%Evu_XUt}$xTfKzuCwkZ zy)w|2sXU~0k>NW-nxUulFh$4l>Ect)`-XRnS1%SGxn;aw*^dlNTi!LW;y?dsz)o4Z zMLIh-+EUlX5;BLq!`fH55T0?i8&3k-Ak6&#($r-5>=1!#qwu0;mY<4M5h?=2ROQWb6Z3=x5AM=_t# z-*yq;bL2@tTaMYQIhur?g21))n9uq*XJ`bmZxZlTKn2@qwGT0Zz;ll>JL)}JQ+)Oy zin8p!x>=d!`Ha?d!`-EP70kg-W_Bu5YrmVoJ$5v+u@M^HRp7Lnpg>ffAK%1V~s7J%pMmvgX&&|&)e7n|Y z2SMPu37UQSpC+NbAn@D_&F+0WL~Wv-AaHGp&V4mZeInZm%(X8%s3rC}*W0Z%-{wt% ztf52HYmOF}Yx8t2F%R0MwH);(0qw44%|~hktp$N=n{~-Etnqt1|3DqJR&NsUeb}e= zX%borf<(EozZUb~wp6|F7M_^c^v__qK2s@`28{lJ9n!^tPScRoq0uViZUtpVS3?&}ICU6l_PoYf>>ju1o++*qj!^j%vl$uYgvl2ehEI4NjNC zfqtYFXB~#9J-k?dS|CgKH9447SdE5C&LtyO-A_QbM1VJIwE;1x{-}bE*{NgoSiwgV zg7w%g&2!NOjll>-EbSW1u2;{1O5w^Zs5Qvo9v6x+;l+Y;RUpgJ6{GvkH0U2rL;d5s zL@kcH1aun_CtooHviR#*C&dvOG!_GN>e{eSXh6`d`0K+0S;Uzs!EEP{300Z>`mg|& z+-zDfyEBXitNFB2Ebq$HVAkS8QEbFMrL%&D-5~UfsUhs5R^0bHda!3PRAKi*yWw_U z;2nZeX3i-owF`9~bcZM}&KhRm$`%sS&pw~T_llT43yj@DJzwZiNBi$~Y|33e8 z|C)YxScRC~@izS^`HuxhHH~7Ye)!koEQr~C&c}0GimAB?$+TJhW_sB6vAZIp#-QEK z8iRf~pJz1m)I~9qeb1Qk%y3J2gxF14hMiW$kdA0mK|Bc6=g#@|J#c0*Z4wIAFNgmY z?)~T(5jWo6*H_W|UT`9k!{;Va)i%iRZ?zAX8 zw7>;J%FeeAu{N{ZMgaE}mhqM@mJstZ*uFQh^n!vKCQNz0GqAz&LGL@EN*<}_p@WxQ z=o{?V)`UU%ndzMAE3e)9 ze<*UHDNuRf^*6;SG!~l^%&je>Elu!FZHTypGvu6Xhqv~Rt;jtXr`J~#!5OMQnjXd$ zt~t0|kGC7&@ME%m5ys}MJ=jQZIY~8L)31jX)3e0HN=Ma8}@%zcyuFsLZ8!Xl?y6Z-xj^!!gRlcinbfCg!D@S9D`@KK( z=2_qAR{V@8&Xm&LYCPJ;Xz>0}$0*DUyP4d;kDXlDAkewcdi1QG1toSJASz>0QF+<> zXwXEXz7NG&Og`F?bx%IpOTkhnr5vK1QmRGg@#Lcm#?h5ck0njpZe|7)kBmJ z-A9IAN@d-#WmOtBs4o(Wsu*Lv))1axxppa_6lN30q zbp&ufWq|}rK0Cw#_~lTqwCH$@g4-#LSWJ?CAM~iUfVUqZu^EAscCqy(k>8JHO;ostX`^EH_UBbq#oUXC}C|?zJbEH-hMJ+=W)m0RAo;j%qs2~7W z7OJSPw+0YUUI3mMs0b)0#cMgID47%iWd-0$A4Peg2q+^(d1Qhj;4KMw-5#S{NSCCW+ zaY}$q0Et>?(*y)2CBQ00X#q(ooG1Yn0l315=Is@LSpcpGpz(;`NgO8Gh8DMzN8J>f z%%Ec##-N{_#-QV`9Y)1wEbX+|(+BAl#gPq25fGJaoL3mo-=SS|6jc)na@0KWgFSx6m)yZ=&BwzX8tfLFqdc*1dRf7iY{b z$FG``yg9eXh+uO&x4NBM+|JEz=O(vvquaT`?OgA6uDG4+#Cg)*k$>ux*DgSL<#ztk z?fiw?`E$4PXKv^Jxt;&(cK+1u{E6H7WA7vyJAU=bBRAlo+xY{x^ZRb+C2r^U+|KX1 zo&V!@{{=3`x8P|CY<+Q7y{N{H4tK0c6tkI?8$DIjT zyY{G$euJ}k^yd^GpHqBdPVvb(#i!;J7w#-BQVNVpHcbn1e@S9H7clpX$943beA0B_3=XYfDjRrzLq9CQ@z! z9w{xTH5U6@U$}V~Hk0dZkMsp>Yu&+OK|U&_sVTNTbWc2w!L&gm1KEp1gX((2?=y)u zW0Ka%BgFyRY#F4oz9U$9u7kZl4G3hXF2<|t{I>}!y^P9?8?8e3G4Z;-}M)2kgiIj+;$w`dHv1S7`r%X(; zxC&6s(!fe?^jB3jm}e2@nW};9deBep8s=M^*&}BI8`QMFN`QG65qKtVfU}@RfC&~6 zxUx9JZ-I|T%OQPKoem~iMBtgufo0zBr8a?C7ZG^obzrL=X#|*ki6-0=Jg`Bd`lu2x z3nK#0Ob=|YRU^P`jQ7DkH1jiR8U#LSaKjI9mkLF+Yz6{cy#?3qA<)%#dVA4TChiCJWl_!JM- zn2sN)*NnLvF}t!%j`#job>dz`< zb*;%-+gAiuyJcdg4#vmuczDmKH*7b=8~l-$VH|=IoMA?#S$74Yowf?{AS6)ToQ@!s~&8nH2SmbH6TARG5M8QQ1vC?6X74x1aH)4(d23>Im^jmjoVGiH_VKSU~UI3U{86;7I-U?u|`lH>Wl_5UnDMFst zs{@Nn#VSB!dSVM4FOH#N{1R-LOCogiT@v~LcVypyRnSc}V+twZ$V$!P2144Fs*sl4 z0n)ajs8^Z@&0OvZ_|5pz_!YK=e(4p*-eIxn(7f2&&9t*i7s`Y(zfvIJTLACIK~`KT z$l@FRMDZ#H$o^m=6o=^(LJ|F#bC*0vG_zAw(LjouJ)t4ndjL(Q8|IcE;Ef$giK-%7{s*qj>B z#>FHda8>3pppB_$zYU!dFaA!&f{wVPh-w&~X*D6Vm{4A$(!1^QlT>k6qtv>~YYKsRjC|M;J?bO!Z34&iLPys3fl~x~{ta z6ob=imSLV(4?{n%Amc0Bed8(H0b{OhiE+Gbn6a&`v9YxBo&JR~M}NZSvURG@H`Xpb zY1WE9?_2)%sczZp{lv1!`zK4h_i{^p?@@?;+06W_x3~EVuRqO`l-}kJhA?wk>sO{* zetk{5{OXt%_&zrc@I7v->$@skx#l}oUrMQ^Z=?H9zuLF8KEcn<;-TVJ z%cj*c>3JLX@2U}%*n5koz0KZBnjWkgPq(uAD_|Qu0n;^$S~9H>3&n;aX$bs*b5nnW z&5te5R?X)rvt-;T)--!s6ZXZjc``pAi(CP_=+Mi~TuCyJ?$gNK_|^uh}V=`Fj(W5Zrl?)~k)YhQ>*4QlZOQ*@7{c=!vb$xijyTz^j9Z#(l&8-qfc;WMi&QU+QT%PGq$%SV=`$l!U} zyw14I+}iAGI&YlG>J*;trB@z0XBD14s~Aih_1^{9L-is8?q}uBhp{ccoqnM=Ckp-O z2h%5pGoLf3Kh#@46g5L^Yr>tsojL8Z8*b(G(#RPde%f&2Pb`xa>W5tJ5ox~CO&dXD z8_X4(2qMzFwaTJ9lS7TPWr0n}KEt;se1#(-2v=w4)Dxe@XSjcVtHpw1oGh$#a&S8BCJ?S|+ceVvaoNmLu92mm}I-1As;?l|llGQ46SFhmTsl z)Q_|}3SUMeSub_8Ru_9i;n9RsM`=|>(~pQawK+^&+D}Y0^B)t!A&<5dqQ03tek6~d%i{@od?k+& zD+PVLJZ9s_+wG}dizB6#pCYe0R;!Htuaa6BqgJw%dKfUijiZNFru|(WP(qyf@qcvk^0XRf^#AA&{LbQc&e{K?^Xp63 zg(jfA?x@adpZ8y03%&Yz)v`UY{iyG2TVfk*i?IG|{meSTYET|qiYyMx$E-o}`JQEb z|McDD`-yJ{-_pn+yxk|sryKoaImzdr>#Z-vi{N-d8gvIK=e;a?uY00u&JXW~JLBh_ z&r%dWC-#3=Ht7A<5fsT{SN4j`NXR^o-=EHe%nR88r3@W(o%MTk#=gdIct{r-)*2?V zGPxHJ=PUQZX1$wXCF$bhKxgi{3&ZvpLX9(>miUVyFp@lW+mV$qbJ#`a%wd<(7qrDN zRn>i~gX`lfza@SFzUzIfAnE@Fwu824YreH5toBW@U!$>UyD7rB#TaJT zXsD_GQXi_UQ5v(DtVcr;Fca=MP+#-)nMck&Hui6AeFil)EtPoky)nfflwy_td0$<* zLX%C*o{9Nbu{~B@gF&NB1g;EynE@IGr8Z%nDKgpeQW^o2+eF}*36phu5Ti9Q z%yH$@ELn-2rURe!(DV;bhm#B!1!dMm_>S&sFQ67D0?*W&>|QC20P1lfaOL1ku2&P0 zcuQ*pP?LKSP?Aoz>bGuc6HuJzGCW9Xb(s=JjMNTm;ai zbLnHrf#^{ewNYr&d9_EfQ})P30F62kcqRa4?>^QfK(kH+m#agsL+Mg;4&kykKb82% z6e!pusOy_rVmroM82$VGtMS52N4y;4sa6XjFlcu-zAL&nDh1}c2;4a(=baGNCR!^v>|s?l4vitl$CX0OU45%>noU z6369t;dKu~Dojq1qKyv(pk!Xu0KxIT0D`y75p^Ik3*-xfymY3mUI!oWeOn; zfcLMnl(J)nB#zo7#CMX!8_r1}@4KVC-Hrl?zK^XFT)h2j!O=kg0cVy-AbxBS=V&j0 z#PMg@#segj&HqH+xzC5SHP zzn%O~0zP!aA3XuKX3k&SMi}N{4Y&u8y4ar)bdVG0o!-WYq0uKG`j;Oh zker>*0jTo{2wsk-9voZ>)c90adH?EQ@OTL4eYDPQleV z1=n^KTvsA3%41!W?zxV6m0YjKnw4DdsW-gmoL_SNBSmkR&3G=BU;P`0XUue3KrkNG!G8pZsyWES5j|8tJLsmW=|y7{^Xa?K?nYkOAvnyh8efne6to=^@` zVJL{p2q(qY_Pvc*M2!RR0xX5IWI9XI7hv<=MZ6wXKY~Kl?u}x?{CtGEdw}Aim3so= zJh%@D!wB^sR|;tD&q7!?DkinxCyQ^@f~eOOA#^yMCD+*-$hLL(CX{_&>i|N;org~N z-m+aG%vu}hzDwJ%rE)oneW}=EEVZ#3#phH^SidVU$7c^dR8-0@-W~|;Ex@jIP)z#k zOGtWBKa{Pm1Hr|uN;P7430SmlD|QWsrF&e$o7 zN2r*0(kYAO>!D%$uN$C4C*sjmjOqGKBlZKGkspBod?KiW@9jkL-FEl{42U>TC5Q5= z^3oQRus?Q_EdKNo9XjcS!4$k$!$RtQ>e(3 zjN1PM`U@4uZQP13{=6G~%F9W(F*IXn$xZxjIfs_~wK_>GOOnL0WV-J&r0ad`GtnDS zLcH#HjYbIKk=C2m;n+Yr*j#KLgbdgHjpvMg4SyJV>(A&vKxD(wy1!U_LglKI5vh`8 zTBe zp!encJoVwdhixVQB!R~^#~h2}JQw1sM=Cz?O+4$ZngMLX6BVBz+b6v;=jXW+XBM>; z^U)ODcp^a59F2gdOYGOFojS2hd{Y8XuQ>hM2eO9=0>57!BhFjk2^Oux+emzzzylgq zmN?J27;#bU;n+9v!ZNsmd*4RM9x5LbL3y_Qm?nTSGp^bZ749oz@{AE8Toh3k!XTnl zlK2hVp(d1bC39 z$PVT-L%dSo+rY&FWsne{;mm%e6#@_RY$3ac)da1a_D?s<)_Lmrvq85$qOWqASBkQvh8aQj+9TRm ze$Q|obelM%PY@J&V~AWlU5mxMcgOk-gq`O_gcy>Qmc*)`#QSu3JsDEBBz#vO$^`>yQl;O%3>Gv-da@0>gB z!BkUOzi3^!rI*hCl>ch~G5*c`t$t_xzVI8*DkMMX#aM_n#o^kSR7_ugWWvtD7 z;9Q&ea8Q8JdX(I!b05wwWt?emMHD`)>+DzfaD>g^ebG(9WR6WY89t14 z4RFS&a}V)*LT&%B&A_>LIm2}l8tSAO2x21A^G_#K^oRBo&+>6 zW!>8zS0!K#!&}zor!nCN@Fny)q!M5)<4HghSZp)X2(YFhf?)XdvKDQQseD+`P_?Ij zFA}V4En{Uv1fG7rNN{&lm4Fov5xD$)6CUlu4v}AvNF7`nRysuB^83v?cT^oUEOrR< z@a@Ia<>FD58H*kwPJB!-<$x`dE{I zbrTVIIukR?wS%ezte=R$)2WyZyme5%&C;-rLQu9f-|R;&)~4IaGWZtD&0=X-NfEEh zz1V*DU;y*_cBy)SSXOxu8|~1 zHI`UZ?ebL4Yg5p|NJ^#jCp(@(v2O6zwZ}$@r*TS4HnQMpJ%evMUq|pBJ>ouZEO z@>n8|!M$FPu_7`pO0z)}6)X(3TX?F&wAN}r#6DRAOigTOuNT}V^p`wV>Mfe;)?3sk z%kp-Vx#%XE=qmJI^>6nMPsBgsjA9#KGbh$2#>B{hlh1o8S9QZO=cA&K3qNm1rOvd>KhiQbHs$Is>Qq&Uure=#Ti<<9t5N_|8LsOcZsBo|3`L*rJJRQ$&8oEnS!`%+9f*lS3}y9$HYB)5+xDHJM-T16&h>n+Jd4?T zwOr0Lq3*&2kLyu>xe470>RHqXp=!j$jsT@%FAi|c5ax4KmpBpAEv~$c@wIUU5aw|N zr2I^}bJFTY{&R-|n7|Pbyzgq4j*avWbXKywl}r7^R3DK-g5Pd7X57%hV@8gfFtJ&q z28|jwu3s*7tbiK-4-}J%`7Q~It&fc#ysvp5z!|1K3tI8An>8j4rVS2zFA7;v2k9At zT6|4@{b{ghPCj^2jhhebw!(72-<|do^x7QFF5V> z$)Y~0+%w*F7%iwi_?xR;=t|6@xP8@AL1u4sgOiy=ee}9jygp~`Q(kQp)!FCDgsd#A zhaNzLLDVH8*f`adTbU4{aALgxf~STFfR%iEl>>?to|Ytmh|WU1P5}4+>LP*2GXD}s zYLWnyyQ3vgJn;etW(lCkuYm-zhF;?U3=MRhiIWdVAfeZJ4q$0cKy>I~-14)Ju?4BO z0I)LWBQpmpNDc%Y1^`_omgEFnj{JrL{JF(ioa$m7@SouCa%9Z;g;>4yp7LJ0@3kmekvvLui@ zTwE^}&>TpC0v8;-U96d@?*8Qwk~nJ}Mh)VzSmxpzHkLsAuwo8il}td&E<7Ee!9E18)LFiB2TNT7f?q)o&Hp;%&-}#!tZ@mr+yN>#4#eEVpDio|Slkk@{k&?7 z33^)qSk)4sEV&~&^4DJF8+5RoB_NTVkwDak!hC~eECEG7T~%D=4xA#Vc}lQYl2*)rjO-6ZcosPYGk4*Lm;#u^72ve8$TA}m|E-{71ZhL(3_$#{J3$nXiLouoIX>#^OaWxfNzkf&;?@_V)K3wNMLc7j~l!D7A z8nRV%7JsV19L~Rop@7{?#gcE1(av!cUsADkUr5QHi^1uB8^x%lM{sR~4p@YH zGw#79404xJ z9LDWCisEo8=I23ayqbzmj&8sZ-=(589cW&E^y);P15je4n6rVd^ccW$cOlr`0%Nm+ z6ss}*KL)Y|f0luwl!~nf9zbk7qH9qdTG*@^`?0FS-*Zb(07gRc;B}0Cb{9`^qI@*#n%dZKiL{<4Y545 z{AgKb8BPJD%x95yZHBpN@0IZLcq*+*H0-<|+M^aNR3p8ORE%YKEH~h0I)2GOh;g6Z=|`zI03SW7N1Z1`GGj z%g&EcT~(j+io}OURF^&j$uTjik3VEyo6XYG&|sv3bc9WO z@~uW`U>DkK?j;;M4Z_=1IoekSu4&SCa~%mOywUm_W=Uv-3(|3@==ry*mn&<#Sq+8r znq%1~`l{Gr7xs!{gAZ%i7rJt# z+FVeZ%@)?S{pJuU>j`Vwesgc>B`X3|4Sue5ZbkdeeI)jXOA1)%lGFgQEU3)gB0E;G(dH>XUI(_CM^Q z%raV^vn2MKsZ!h!6!$-X5XwW!qB?K3vRZ962g`n6XTkWZSZGuhnv{iRWuZk`XjQ6t z`A*fs7UEO0&IjHl?Dk-cLHEULSTg z%yAwWlXAo^wzZ2#gf05j&!*)6>gT1@OED^0x^@l|az71hpgUr1VAWYp`hKN*X8GLz zbJZu-cd~h%IZ;1f-$!4ALOUS9L!1&$|5$EvpGGWU_*pZ19n-pw<6p>Gt~bsSnt=5= z(P8-yvb~grh>Q zn4zjJky(Pug#{X48U2BTM6<1KiFRsg#%us=zuWDyL zcU)R@skw5aSc6zQr>aB>RZ5DEtFPd=6{3nr7*_|$)vHEIAD|Q+S75nQz|dB7*z0jhz$Ze!o;@hT6&#i~l8DVzZ#J>JJT1 zN(m;>oG-{)*Uo+~sT1x+YMxn8SoQyq1zZddB^d6}kLpNO6*N^OHF0rmwM$Rx1hBk+ z5Il%$?q@+?MWS5-AD_??aX*`T8}q=Y(B!Vi^0R<9!ggQ3 zMBA1s#}XD)_h(%#Dqc(A0Y^eZ9L10Qm2x>Lq5f>a3>B{_`5T0Aeq>$QSIy46yj8r0 z8y-KfT}{?3B}6t|T|nENjXEO*R*;Uzf7n8`u2Vv(73WEb!Qt=rCVn*?$P}XFrTq+Yb_WfUm%zfmq(q&JK+U z5|*IMcpoJ*-dEWd@24nv7TW{7D_81R6?-K_eGYnJbkwo?E5U&UwoKaq%WGSh^*@pF z*NRC{+wnB#x+*(;Kr1yD{d|I{EKc@K^F?8=`f)9hO{w zrD+#LG2UXGWsSkK?6KttazG8T1e>p!zcP<8*E2mZ?KDj@wJ^Ri9x%=^b~5@PoZVtW zFGB_WMSZS*sJ^ChN7AMJrFM=E>Z&C^D4Tpi@Z8C~5w zXfQNQ8|fh+dpGmENh1V5#+NRTjMJ4|L+nLYINU=3C6sdOp->X^nkno+(yAhqY&rOf)eNPtT#;?=mFQqThzUQW@!Vo2569ip+s_f(99Tv z{u%*|5~6Lqc^~_Ev=FO^^3VSI}MyMtykN`x?%l zmzobsyIvb6gFYptrTl1iPDX|iu_mWY+$hy&A$+vE_D=*e7 zO&q3}0Hs&(aBE{^;@D|OH}o_F>F?_I(A-jaWGSMsVf4?D3%U@;YW#nIUSC5@9mkIN zI?OR3Hb6P1%`gwK%i;O+dIl)L2BUSk@|dQ9_mP4f@tj!YJ;TBrr+Nn{h+x%ENI3Yc zXITETJ^_k(N={69qgnBvg?w1VG|}IhG#lzzvWVSp^l6C0_h~t$mSark%8LBL6bC5# ziUSp6WQC@>BRc&!B#rpn|E7Moey&$9-7~M<{wKYwID+h@l#C4GaMf&_+jBH<$-=0@=fkC2`Qi#j3MW#*}~CKZ7_ zor@hXMj*AU(1gQ~ZXM_-s#Z=36*C}5NuD1x(SSM%dU`oK^4_W>H0LmzLx>}IUs;8e zooTR=lhEjlHDU(E)$6c~^U6$c(zJ?K!Y)pgYsL=>aYXK_ASz)Cr^@?JkBG{0j)Lof zirw7E3r3ly4pTu{rM;so{$Iytby^o;Q_l#zC+y?v@hETkc|bbomG;8N?jPj$m0u0tt%xa;N$=ra*Sv<d9&`1zlsve@WW{W)OCB)Ytdr6)`5YJv*9+Bd!0gwa*w~W7BEy7~$UMyrf-LCvMNJxJs6^&ze2~({AJE7!Q6(}@Q-mPfd*HD~hWRRy zxeOB}%O1(=OG?LNmEIHdU2z#F*rXMYq-i}JvsEI{j1=tHlLrEulwMUx_B2+olEUg% zmi*bn3VaN`K1{D-s*er?zs+)cJgLP_HN9&WG+t*P2pVfrdS%f}tZLps6b{);QhFtc zd6+ra$k_X8A1ew1Pjd%bRJueZs2~VD%^vJzA1Pr{dU+7g7_a6ekvewbcb3}0g!zM3r8=N{7h(h&bW;eZ^zN$I7)N>7Z3K}C?+_x_`i1qw1x z^NJt~%6E|k2r^G&iy%v?>>{%ZGEdWsAe&#+MFwLh_07`|BO499tvvy-ebR&GX_jGU zZ{Aj)HW)hjHNre2xQlD3+>_E_?Ih6C;3~*w{w>MyD0+*rj1*&;T=sW4mOcKFG&%M# zW4VnUyH^PhBAu2*ri30vs*MxMaZS zN))k8V6BIWqO2q6(=v*5XG)hY(1Y8&lHj%qE)Z5(;-u9TS8h1s-?A$Kj>7I`6!p&@ zMp_~n{I3vR&Jon>EyeE0%nnrKZb>B__F8hFjec*Y6cbgJ#B6Nlt%RwWIn!agB|$Et znDvNK>=@h8b*Zr863PDlFf@^_#ZfY$lzMy9VafFb1%Hj#E`Atxi6pz%i;$dOU)6GC z1_x-v2-_}U0qvx07T>m%-O+MdDUAX~UU!OGjvrRw8pX4lF8!bf*SIOwJS?i+YrCUI zOK*(?R$=0Ik*uyrqwu;$fHs)04--kzudli|{(NZHSYaOK^BWT(v$H=E_v#Itf~ETnLEvdrX)Fnv{0l$cUJCv|143v~Vm z{TKRo_V@GK<~QCER9Hy~@`1~X_K&7U?^^R0{8341i)gSq%Z2?mPeNH<`VN*)8D4i#v8_AhAW16sC)(}zbhZ;3c)V_ zOG@{W<4T!`RDT^-weoH4>9!F;c~+!3?&KETY@t2YR>`iR;er5c_lDP4&r>Z&vOK#C>t>XK%_^Ub=jvy4-}K%AQ5$)X5sN ze4o19h2sCbslFNH+tkHe0!-$XP3JFD~-E{l|a5tJ+`R^u$JFmR{cBW8#UWwyDFQRU93uwZ&lZ})u^ZE0@(5jS5yh| zo$9%Lm7T35J2@$hUZ|K3Xxms>Ue-l*&YLD*q}pCq0^#1f_@XLazD8Y}T7#BdRGo+A zTU6WEidHYF{g$s!*Zx-fGxR&kDg{qUqjxB}<+;z5W+^uIb?l#V#-5ZWU!=6_mF53= zK@~4wq1x`(oac~U=%;mCzCRU@O|UF;%{f(oe0#b!!u>j4?ib{{+P9L=3S>yh@hUqzCCJ35Z4*y_9Uf85`XgBy{ z;*t>cyX=+C5!KY7ww06~CFL~Ht8K+Me`0s+@iJ(m)LM|lBEOI@cSQaHS*z+O+DDID z4Y9;a?kXr8xnm7Tf6)BFEAikAPj6{zNF5@b9LwwRq-@S#H^s!+@#2}H$SM5r^cI3_ z&G;&UjNSXrHPU@L1{4$xfYosY0+bl4uXdPIjsAnTo^CNVw+U z$>P#A3^qpoD7rg(vivy`pOjt?I1Pd7@R@fB7nk+VMJu~h{<-$&0Yf8Ek^866K$P@* zrhzar66oO!DySkZxTs)hBr12;P(kF??uigZi!)}lw>n@R30k^f{YPS+wtA|9ncThS zt{LHoHz*vrCO?oQAMn%uGKLEhGAxK`f6QyUHtqlL4RH0R7O)Sh?XR&96BI=Y<_h+e z$;Uxa`6pKcL*Jla2|wZu_40ie#SlTU=5jg|h?ojv#$MWnd24uY7tP@Rp~+3LqNOcA z`D%Z{@q#8`d#sRC+wr=upN0;411;xB_=1)W*NeQgtBDgdMf+!{J;}6g5glkI#U;rsiT;*0I1>HYtQCLx`!rSPQdTKb7r$eJVM zu)^U_?T$K+T`PsYZ<09TXOozg^L@07?jvZFYm^6*FY5juYi}N3(~9@AH=28x&JJgP}3g$%vtP# za^dI;_k*F^#2SXa|6ovJzI2p4lt6c#wFvL4EPjuYtQ|~mEkd7v5UKcwOYX|x+BG@A z-bW`_()K#yumqQ3QT84b;@dqnO0w5KS(M_vTJTgNj%D!2sTnn1j`O9Q3a#JuoyCOh z`A=G2=UVQH>mes=>>et|mESSrBJY~Qbysose&ba2u2jwDFW_{@(OUiYR2IMA%@%-C zj?+Kx%Fe?zELf!HaqQkN(40!~H_US2C|E?7QP*IK_(8Q`cYaQ-hxVwptL8UNCuzUb z-ntgt%P~F6JO{LT^uT+-7tL*uWuE&6 zX@mQxRxcpsx6fOiI?FN(8VfOy(kstfTrp({83TttRyQGe`6t}zUO3WLudZg9B}@$m zI})K+2hLfV&n#Zva46!QyY-x<_+|l9<5whZ$gc+6_&aM!z${&A7~pusol=fkQ!tB` z8V4lncbv0aC=-GO8t4t@B=q`{wRl3RpclbO3H{vJnyFckyy2{b9yB;(DU?Nuyy3Kj zrUzOX%p#=5c}d)lD7ACw8B4v*Vx-22N$g56IISwNFqoxC4Fh{Qp|R=KEX-2G>g|LY z{e%?GL|C&h%MdFAyF;O|KbKf4VHP76wOmm~uu*i66nI4oZwJd3{Un5=;sd;j_I1Gu*krvb>VivG9 zEc_}nl?}-=fB8(ac(uIF97x}57g%U!xoV-|P?l$V1I7H?a``vYJUjav!-NH9ftYEY zm*rbucg}8s>#uE^TCTrlnx|$Kh9FA$bg{X+Wtt~t7Mge(8a3NP76$XAtmcX6>_=a_ zQ`|Ib>O3cdP7n;mgYp9@yp^?HJPWhXtnW)xw_nL3?J{}x#Rm+n4^7AZNjTu>y1@F0 z&2z6B4m?GTd(S`QT1v>%FV3U{C)PwJsegiD+UHgV^Q_Ctz;9O5l8Skj);CYNhQ9H3 zHN~vTwJ?}xTzu?EykQNWbn`8=HfrwHx2)mi3%S2OJKs{g*8gDO$FAx7`qtdd)2lZh zy{2yhtPEHm5`+3 zC!TP1Y2)0^*`SYyBcoiW<4$3YUpZX2zh)m}mv1}EwzIZ@=8*JAa(8`fv&*$L*!jO* z;;-dTT9%uGXB&UbOnvg$#H7jNCk&mC6hCrY%EZ+1@#9ib#>6Kl;(u@89_>2xrc3qf zycPO42DxVs`R|eCKr=Du-7FV-&wCPMq*j{P?8t0uV<#jJ9sQqb{5PscqfFl{M+eW29W_}o6NU~Oo%Da&=WpEqjn;@W z@`!k&`2UTUSpz9g>tV9TCmfj+PkAny+Vk^Cv?yh4P06*Mupl zNkO4ag}|EGI+ zglE>uvUl(d3)5tM@E;rKpV@hY{m*s?lxr#{)6ZHDw^3+e(Kq5JB#%jw+jr^QrN{en zkDmB{-}l}_10;=`lr%0edH4kR{f;00oicoMGQRSEqD&l@{7>dFNfSOw!TOP|mecs* zJ$ zJ3BkI{BNu*kp5S>$ZulZTIn;gw2*9VG+8d9kNmg9{u{0_ywPX=8=D-clmGUgS?J+I z)6e@|oax)?bzNxkbUY`K=A!XbgAuC@Z`HBJ3PR&)@NL^^W=7MMdUf7*!nm6!=1bZZx!mb8X6NP{+j##ZRDbkGj;QjQVvPyi_hZaA;y;QX=AVvXX;m7bM$G zB+J7Y$^Jb}yjlU78EB!*IqFhJBqm!Dv&u#9EE@;Uc1Wv(XiGBGuOs9GC&<6eC9>H} z0UN?y5q}Q#7-y72F z_DrAYkU7J_vs;j6roZS*qD99gO<>uXGB$q9(8=+mlSd@+xa5BDCfVlSZ*=cBiT@|A zoHAB^lO=GB+uza;iq4#lWdR4T^f$TU8^0NjnHiS(se@-wkd#?Rw9lL5H$!dT=vHsi z0@dOqCkjCkDkn|i;hmiLzBxQ5d91baW}y@+|Gh=5U|jDm-P`r<&qCQUN^6n}l7qrC zr_tSCbfGQWU(TGIIn}|_WYT1H6opSrnv^^|X}tL#_ZM%Jx1I`9<3a~7imh$`HWe4> z`bmu_WP`3v^xK(J(B)uE^c0^?E&k}C!;+x$L``8#h#-Htzf;|x)0M@wYW4VUygb_f z&R_sBDgt6hC(8n53Xghb(6Y&*sfF zSrMY){;74?xcN_*&5bvj^gpq@p^+d<)@eOlXya2{$11Fr?BCE1C{?Q2f`L*VJK_Hy zjcfa>}C$cU+v7s_^X)-_Y>}~hvLtx7I?VW#@Qv1i-;Exo#XUA zE^zMXx3APo$=m5AnQW6mQT$H2OWsORo(tB`c75p}>FxdmjJsF^Pva){V7-^!4o-ls z4A!@_eTbiwb~oB9x79Vg7=7ug&R)|M7_P-X*^C#x*%OrV=VrZ~2S8_P{L2blOFp~I zK)WMz^mZ2j>S)8H1U#^GZ?>0RKEe)ac>N~{UUpn#TiUR(Kkl--q`LuN3IC(F3ng*XEM{xqSA$*Qx;K{n< z_8-}Y*&VbSYF8J&pS#++X-{hxXq#$|X-48v$wg9gWM=t~LbB!FRB%lmxy7MGJ6vNI z72!D0kK8p~aglnIy&JvKzooN_3TJR=-w?cSG|SyU$Ioa1hFL&-fw!=EcGUGP9l4#= z9<}ch>^RUrh-MD;H03VwG*H0-*_pn%pmn8s=d?%dq5`ZjtCn%hR{?%H^8Xz%yEbs_ z?;l1d2Zfj@;d4(nHEPg#t-*G9eaHTE@=uvxTk>?LG2K0#$o&*v4~nVh*dN{^H}v*1 zQPC>7uAy;a^7z!zL#N1%WBlGAc~bd#t&{C=*^*LGJx?P|I;G9Ff7b^Y2a)>#B$7I) zt~p~av+Zzi3wz3-x+rWzPaT~t)`r*)_X1(Vrb&*?yP&rnqw$RN2v5``)Kg~{{FWmg?h)ZtrCPmRkf)WtV~wn{ z3#w}kXP-bgstz|_ev+q&d_VolWY@B`V}HN-dq#NCm^tolG-_*qon6aXz|=36Vf1#R zMHjU?r^X(R-Te5=Q@+t#R$``_B%NJjcYw6vBhLoP&S?xZF*tVjBj03Cf12^EF2Zgc z217074q|xOHF327Wcb+ALkUx(xmeM3pp@~F<2qZxa+5bM*sqm!y2g5QF&t^=mAXN? z#yV@*cb$x|lO?R=oHt4U94!EyT z*Cy^cZgk9Y^mf?d(95B&eSv*fy9&D*c5m5k#FOebv>CWfwn@`Nx+P^wUN+RYxNd(< z;!tuwtMyUld@4yMYV%4Cr4z4YUnLg8&z-_HVvsCnu=#6Qrh*RDMsj+M;35vno$DEB zJvPE=;%m7fc~$_6JJQ!cJ${jOw0nMqJ(a$e{pr&R*^grT`x!`I8@;kmg&a)Rm|5QD zXQV9^NUzIv1jPltU583_S-^ zAO@$DmTp3hJ#Qe^6~=t)hI`SL8_1{gO$77&w;L!T1w-Y^DQ!bq%UD$kM#-z;z`neR zlxy8$z78nQ&6{$l)SEWkLRRb7ZZ%T(TXKk9R4*O?euMnrd07;+pN^`3cLc~PoA z9N4|;=BKld?dCWTBVlM7Mq5Iq{Hu4o7|03g(?!X|$`~*)hV8){Xb9 zOR-x)uiwcM-tyfYd9MrOu>*ngl>bmLz;k2?`dRLn5^pjJnQ%ffs$KM>Y9Y1uG9d0@l zIDF#J*1^vHsQoe(j|eRJ>RAmj+Y*J{h_(x-GgxeLp?@Qs2*q z@(#oIZky&oI*rZuDnof|F!a=bA?>4k(8mseUK*SE=E%%#Yyg`30i;!M&lk(q`(!;{ z4BK!TB0g^cnl@%XI#O$`Uliqdrr_Dbt8G1~U_Y30I1%XaaP+-Eq_H6o1&ECdGE$B$ zjJg5j9l=&N1B{ImQvH4m}H832% zXR8s*^IBoyJ}?}Wnq@9nG#-E0@e>MG5R8IJ6*s+Ty`F3K6>SLigBO$#XpkIx6e#sr zGn|BS4s_Dke4)~^2@f>vwnw&CMx#Xv#zw&q@ouK0jKi`I$qs&AQkKe@_nT~zGS%Oy z@eu}c8y0~=%v1}J-4$vp!rV1c;LpZ#(FvB#JA>&&lkjD&@ zRlx0t>?KX1awos~QXzbK%Jrok6yxvbXTPhyZG^vudX5;{-LsCvIEU448;})@^!kx%$nFHjcCtN)VYkhA46r=QQ9%^_Bf6# zDkRRLly(4-&Qm%5aXhESYE14ICwtl3oDn(ZXaapTHYL>axZJ?$q?w}$<-4M=y<)l{ zk$qGMn*DCsHo^}+0JqAfEm}9fBX~+JUJt)LE%8|r#>Ap4EM%(|zJHS6F_7>10YDy5 zI}g8>(mjZr{w7K{dK-k=r}Z9wsnQ*_L&vp#*PSk%!Wg=3?&0;|zVyiD0T>#JG+C1H z;3vI{zEc90>nckTA-N4r(6`r@;7eUq(SpMtVrYEd9~qacNVbHhO?;bo(^1YDbmw9h zFpQnl86%|`;>v+|%Ci_jj6uIozVf0Iv)nP}VM)>lb^M~>iOKJb+)}}TV3eU?DVP(E zy|1I}b4V($J4Sk=Rv6Hymv7F+OnKvcj+(Bgdp#YDy`8$AWITD0x3{SQ;-1r zeV0$Tjt-doqLiEr2Yeny>F-5pmsw^tfM@T^i5y{#TEkTKhMxC(i0DwCF?ZhJj;KMu z58vt5uUo&a{cTpMjBtzYMLE$ZbHv+t5%t1F8OoqBYW(_q2nNEBDwl9>o-l?pSfL`q zS^2JdER&a$pn{!IG~c*-eAc85A>Fff(KeGR@qN zI;7TWhkf}5;W{UqB|I4muo*-SRdRUn8T+yJ4t9BVQ8OSVQz;98IuGf1OfP z;RK2>%a<@EcDS!WN%_JRB}jXTgtV_v?`$XuBb|FGhvZgiZEb^W7TC-;EU6Ryi@uxg znr@n|t8?B;$}8>jPSRy^X=#u+jRa0sPn;RLxQpTc;!?G<|7u#M0?MMgh>%tnhEKx z3%AB#_32Ba&V>Zpx0)H|IM_V-oztYPJMBpCtkWjV3?=vTNJ(xEk@@;uNOLfTSQ)K? z42;1lWA^VM9l;Z16?_K~_z8HeW zn-@1;`2^Z49(|qh67zbdSj8G;T>v*8Bb5oyG`ve0{L3QZoT+bVNC@33MGV6sh~@`P zf^@W_6fxp10<*|D0|i|S381ZvRe4|_?I}f2zJwqPKKkP#V*HDPMP|`wP+6)jAvcTs zbEaP}A-4}NgU@gWPL$R*KyMDa46KN;a>lS|WuOI@LjuqV_FN80LIb^RZ6NUm0;UAq z+HcWAPTV>zpv7e&A>j73a^txJ6Bz5Nj8*0tflhfZkmcVM{KW{m3z2#svkGMsqf%{E z(&D88Z^b9bR|-OkwX7*xrV*S!7TzF}Epr5&vJ2c{O7CEnwR!txqvDUBVaj#^SY`}5 zm)qO zxn?Vj$4)8mu~**b?%WV}CB?YnpihY9BA4BPV#=HS4GN)CH7ik)z2!^okD62nmlqT=- zqB%K3J;?W~k9E1XqT%tfJ6M)!s|Av$HGqW5S z4a4IvR5oGx0FT_GqxF)nXcf3~d00|3u7flhHO(3&hU8740R}Wl|&>Q~DHjIZ+#S zmi%!7%7#9P451%lj2fvH7bitR?+Zq=jSEKXVi%+yUWwEVBvqp2vx8$y65O;)d^I|MI{xuA)c>O60&esk=VibE}=R7^%ANjVlPd?$+*#63KW%*IF}@p z!bxou+377KwG54kqLbTJp#AqJ!ZvKvSx;B*gEgbpaCb^YkcB=zWo}rEmvZGIx6tdm zppR>XQWZCZfu|qKIcU{yP_<_eqP0adjS8fWO)z0$Sq&7=gK2m*95EXmV8t5K8AjME zwK(NoT@9p!$F!GhXz2Ns=&^wT9%P8%{?Qtr?Dt;oWOxvb2-Tbznk2c8fxPDT$Bg1i zG!~og4{@iR5ivUHsi5b64D#jE19XFI9<$*VY%_-)Ep&9f1b3YETu!)*gd5JEo%`z_ z>ho~Zucz*&E?wt|XHip}92^zLNIV=p(*CtQ*+*ctJl^)H?Ht=^ZHYEr>xqj!12qnk zB6YL5Mj5iHxxUxTfsTz;?L;#tb@s{Cz!r*W(Jeq4po*lbizq$MZz#V-w=k(cooLam z4-LJlg?|2|vlk7H1o{D_2hhgXfTncn=Rs~QyG82zT2hV`vqfJ@ZP_gtOABDCnFyU} z*)2@p=Wm=j_JO_>8U@ai%&Bz8)DUwdv{e*x>P5Swkkiyw-SFtUWwu1!TXhSgtBhIZ zOOezy8bO{ck#{tx8#!p5F45DC9Qq=oRe%&q854ASsB8B(7N zO`-ko7KcPivBIzr?}%<&f;1COh6LF+oEd{uaoe)1Cr|#Jo&9IFWg7$kImXh^=H0yP zd(3RZi+MSxLh91}lSrtwYN3cnMGMcPEfP+_LW!ntPlfa*zw}C9dVMOSixfp&N_CaWRQ|?+o&3 zN>k4OZ=&LsDP40m&iV$~e9r^o9oLJl|H9q2qb`Xq2IrrgWA%6SpXnRve$!3RIXi82 zigSG7_?=^{!|x8CIe6LsW}jg1Z1;m*ob4?v6Nchpu2iifJdw1Nis2L=n+Iw8n#aK7 zT_UdJw&^U>niYv~K=)%JI#F8$YyF7eG7ctia2f}7D-q1&;5818TbYRNG!I*4omVCX z(|M*=?HY>lPx#)1qb|4pE7g)uu?SM}N>9_#hRpS$4YKyjzHBJ{9!; zStBL$?rclSnUdf|JwF+aZ*(XPS}jW5w3S=Lm}u@a(3CGn^V(Ay)}F4xOexrs3Jdi3 z5UgTi|$%Yjm;u+doEES=LW4eWY#| z=e3!RW$R5);v-ccW_dp^G^f~Gj{SFkFDXg=T`>F|nZeh_nk3RRv zeQ&xR?I{wMuZ+Ml76XILxvfC42C2V`Vw|wF_iQ?H9%x1yPsKtOvky>#?4??mlhYFP zP<>c{Tp@i(Ior?yV)ZhV{-+6g%HD<|#I5ZB(=v84rj)Du`Q%PV(KOqEC$z<0RpGY& zFkabKEo#~wCfZN^^f>MX7N70?y|E7@5-eIe#Xt)yw0b((cO4dwUS{D_cA1Q##hDqZ zC%^>isoET=OTCc&jD!x9_H#ejJinmWhIEuh+KF5n`NKfnwFBScJ(5=UYe^fHv7vlu zh_}?00oZD*`=MW8ho2hV(t1npn$a1b;LgA;txoE!0&~WV#?#DeP~NNSIM(|x27J3r zM4>vWG10g}lq8RT`ElPov2V1u)J~;m7xL2U zk6fmU6QJIY4$&(!&l=f6%S10tMObR1vgREfVv^dbzsvEP>ivS&&+CVRG&%;h)+DX# z*TSg<8%hmv`v8;npn6|BG_WtmaRD2<@9i4d{$`t?{R@7l0yRbO=T;^COOnM+gCuQI zChUScOsQaB+a)!bR#-xFYIPSG`h&zr6 zazpTS`W<{1MDPX->2(}ze;(AhZ$36sMhnC+okxt-j72OTV4zaWIB3Rs@OfN7;foJ{ z?2dVaGu=EtIfSNO09(~H4B!~XG9o+MQlATme-#AFl){;c7~cULA3`)sY};gfjgJyA z(T_xy^-DoAQMb~`eRzt&4JYOtCrT$rI=*cYh9{jys9LGf#mT|6_ad^kPDCiZB^;40 zx`?cUFM&`qAt`K$478U(m6GPQD7@!wqch!;g1uH zG=s6|%fJp_M%CxEMFSQxww$phWk@b2#z=N$_>>C~!sH#PhX?Do<+#fT zycU4@44o}Q!He=?3GkGLa`O{IC0CV6IWgLYUgsyiN989lXEWs|22u7ha9U?Il(Z}{ zjGixp^0&-uoaw|eDF0#0fn{5!Hq?AM_zo|JL6Gf_$w-1rl8%-z*5n89mA8gjq5lD9 z#X=6sY?6%Upk@Vv2^=it;Kv-y<6wssiT-RJ^$xIs|G@9v8oB=FI@`6r%MO=5&Uc)% zoK1SmT2?KmHBKGiDt5f1-eI}J2>Vy|xo{7Az;1}`6WecX8)^%+{owxa3yqJoS?XbP z!)69M;)8n4?4>rsoSz^I(hm-?qtzAK0Ls6vg;Db|i$wzjVBx_b_T+IL1g9Gy4F4Qkw?~E;XfIHjIUPnZ zm-Iu`LI-cffK+=A87q)%gMTE}vNlH_H=kf1U^>;abQ%-*z$}6j6HMTPi#*s5@m4bPryrePa2s_hEN=`aX7*UF? z2@6f1KxzA}U~Otz4kdPfwjTRC!d8jDKO1I4TWlT8ah&KaDJoiU1WO80~4lU%a z&ckak1Li`Hf3I@R(M|M{=BvL`8-C?Z-DwmS)D|K_TvHf!#RKQ^f*VdGU@y-9OOM<= z`QlIm9~?AnTnx+acx^qkW3umCDevPbL(T$@+kF9SmN{ympz3QZC4Z0NWvf8mq){+p zoF~F+Sq$d96I@Ya3brRtz+u`q%+ykvt@7e#7gTMt`JxnwC1`T5_NeSalu{g4$XG`n z)a?*T^%b{G3Tf=wXpFDpPD@=U>ZLCzjjhPIHuJGdxO5aOw%)T~GPxmhacCiaUbYAA zT7<%7shp|tb5Zn6HBhx-0#?!%BQw7+ZpaSPy>UiCwN<@R`rv$mv=lWkyffW|qY2`7 z>{djHe+QQ2bQO@3KV^-dts{9J^3Q($1=byyK24<;Z-#*pVT%Hd^FbTLeUpv+r>L~5 zYSg~X?%I0ZSK^$qmyUv6m5g?X|9rd$McXaWV=5;KB!A^)B$IDS6kZw--8XMLchliG znNeoMj%3w#bklmv(E$r!gF1~kD!MGg5?~{;^0&c z$~zFe!@;W@!~m12j{|~T9XT!scW`hc2QkcKs_%%P(TU^kL~u3-Go4Tj9&2>p5v8i+ zpyBZZ6ZLgkf)yIw62IJ8tm${98EBsqB5Vgl1)o4QG}IwNJw1XM^I&(cW0rY3u*_nt zYCAr~ea5O8d&F4kci`js4GqyF?f_?CvCCmaAK>6#4kpa!qzVzOsWYMlyKwQ$vmG2- zU~I0yEE67j>5PItcR}2^V|6f;r@(?A;ezyrxdJPHF&63VMtUVKhVhUng&i%p1mv-Rea94S+N z!Lg56v^n;__>f}C)6 z19lm{gG9%959TlW2m^i0+Vzmk$Kl)&tJ_thR$>ui{_GACPIyejIgGqJh>cSm82VFJ zV-8+X0Oq4A4KLbzNr%0Da}6}Q8EQE%%TJK6P+ zWViGAdh=0}2i|BQ!*9^cXtEk)xLT>lQM26hwC+r&|m)Y?$&$3T`= zsmaOq6VcoFkV$$E#N1Ju4`t2TyjK~@{Hj|s<59JC9O~uyDv`oOFyW;AGEFG z<+S|&A^Y3y&BwjzBP+2!v_C(D*6hv4%+hRxU{wvB`{Bnpp%oC`k11PT5dZD$y z=7-Vh14u6JNe0dd9{`KTK?JRK2}BEWoXQv*F1f#?+0&f^sN_|KYOIwW2f^0$H()SS za0vJy%D$0<3EO94w{8K{q|a|)F&+XYF0B_b_61{^j9HBt%GiWF{`d`fR31WU3#v0P zF*$^i^*)SXxmSA_d~D+s9zxvhhdE1cT>tltnh!zvZ^R8bjJWlVaORKEsoB<{-TzLr zqj^l21p+RWL)>i!RL#&M<`rhpD~><>2(ST1xlq1Hm@ObmXXYbJ*vkaAgs5;cn42C2 z7JLjPlYh*?%YAGRy=LY`Oqk1rj81diso56jC+)HPhV<+h_n<21r=(-3!O-If!b-w& z&@nXH;p0f&VkkLN`gD$h0PBh7ONw7HF5n!`QSUa_}#oBq=5V0#}nd?%!gj(<0x5|14lMB9# z;6+v$NbwIgVM*X)^w0NL)E&FODHJoop~c)DEDD=H*c7Tx0`m&j`?KrO__BPn!Bf04T_d@(PH#cm zPpyoixomzIN>+MFm(7>N5?4SFUtFP=%2lA~mlY-)6Bakf;;zH4DOG=GRN(g|azDKi z3(d`-&BQaDuRd6(qmx^3+e|#eS#TQK%X}wIUcUuf?H{hwVXGZy1aNPR-{TBb4Hj+rq->h6O^dU$pv zu1UzNQ0U!TIO()dBrP7~T(=pua9~~}YOlsIY(Z`8RHLR*9W3UHYZO>bM;v~aGy5D0 zY^$0tzJMw{Re`G0 zn_$ZfS*NFAr7N*c|Mo66d3N#iI;V3rlIU27BtTbs^{y+yLiu?e+N2Z~%GE-omoV%d z_uQRmkwF5du~ zXV(fnwf_)hj=8kb8|T;&8Rw*Tt*AraURoI?Ria11zN~UY@yDJA@3_k-ZFvKvnzC}e zC~Yn7ac}d}sfEbqwi+S(@=BBW;CEFe+R(5Yv>QxoiSyr_W^O~IasL96%;{F`r5Lv9 zaJnVD6l~YbNif4$2Cnk=3l&#l=ylmjJKKhEt%~XL+fDdn;G3(d(e@?wqwKYIi|m@( z9O{_fPT4+bN8AJ2d0{^B8#g2d4rk zvI3a3vNYxa_!P#tdUW=|sbI3ZeyV=%?iD5)UvVmwQm%toRB@^^_ST?0e5uifT&%ra zK!WdHM}oVrZi2wPvjTg4rN9cVpYkXFYn$p((NF8KA9Qt77|pr133*=K)R~_0uPUEM|4Q9+NUwyI92brTDYx zlK0C{oo%;Kor20uGUjUU(pz_sQr`NV-t>9prVz@wgPh9O@ARU39K6NBjCq)J@LY^` zm#na-{&x^R?k<;TK9p`o$qI-X#EwfnsPtggMG%dqd2()HWuW3AJ}?J4f~XWwK(%QaEy{ z#jmahF*l#QM=y1v4L@vciA`vT%%ZMWu)5H=1V{IdmwM+;#Y4++EAV1w2j+pSJgz|t zEx5?Ji7mx~6Tp2GtVE;BR z0WFepUZR047XWoqU#>c4$6F3Tex#h#k>9GVzPV2~o3OVoC~>>a8#S6{Y9Q~WJ#ILL z<1!pH7R!9MiKSjBf3Nd=Vof|C9y6TF(AXTA{qdy($$haI4T%$ySS8PyzNZE6Fnd$P zD|>jra&y4Bxk>m!sT;SLkXXCVT#=Y~Ox!p53=#|F#JqB!A$ExPN#*SiLb0qtli+Nm zs1V;&lqR5Uvs;#!G@2mz;6?_;ElW^%EcbIj_`vgx;${dG2>Y~~zobN$I~kedF>#o> zzi=<|SNRJbZ!^%vpOHy@70CD#jda7xJ-Om zQtGW1I3Wb`Gh)IX9i{$-mN5MLAg(z7iC~qg;%^8`G zOCFb@(R@X(H>u3R;hk?^tafOyNX)%nc(}X#K6l@@xdbhwUi?oemEV$ePI3Q_>2vnW zGF<&S2Q>m?NKda`l!emsmt`={POrp-=w(?L4SQ9F1-kWH(9u2w%kh_BIn0>FB#NXa zuMwj>21~ultFjPU$iW(m$nG^#iF*Vr;c+3aK5oIb&FeA<8pbT<5cXPLm-&;=A1IJ8 zmS#Ufw153k=9e3~4!v|YVm*6=SROY|K@|T{_AX83Uv4mxXx|^mw)`ekR>nD$yZcRO zp@^$cG_eEMnP~G(=pEvq{0s$PtBAaB0ei)m#V(4Zk=Kx()hMD3!p_@$8uGQ}6=<`AjHRwZ1=t|UJp&Or z`xbbGNfgV$JZ3ZAM$W=G%3#d#>Zyh_^EPrWeu)CJnMAwKAk9B+fw}TF=VP&!a?c{d zz}vukS8_?LMpG;k_Ay~Q6RhSFrJqGgA(bH1y8}Y8KkQL9qo{~u%wxhVCS;VZH07QH zRL#(%J1C;Xrh+O2KKu^w0e89RYhm)iw!#$RE@Bpyp+mXy;@a+W$gAmH@CD!Fyezg> z?(c}di1E3=VRG@wSG4Bydq}OQ97~Qpf7Jf4s9Gkd&XCKK}s5Fe|lwW>OH;^ z&sjrIQpCi{5Qj;hHMKI4Rb*1#@s(|9=lDwW#Z|aQO%p4_D0EUK4goBK_e;o41I-^_ z89)st0E=6XTp>9PWH$-ecgzR*X`-j&5$)>R=EY(ruC*b*T8j^_{bH+#tf;eF=nB?$hqVxqWm)kx; zY>%fLI{@Pf;#E&$o|J{!`C6JEmugwko_hY2PzQs_l%)K}gT^9J$r2MzG)vhK6td<2riHeDj`zZ&o$ZV~d4ib^xnp>=9#D zfCx+RWmNn=Y|!U)1YummQ-?hrFeW#KhkHy&Fe&S_y0%IDb2 zm^Ibpgc?jM1=3AbdGIq}l=`W@U*t=VVNG4C!WLrR(^$A3QGrkNTPe2lMj~#;Wl%4@ ze_D_2NZ~3b`!`q;Gt6#d47V}cXmA4wPgfD~{8&5?x-#}2)|~sE_QF=Ai1@n2buZkQ z&N)=t0_WfaB-c8RjIhf7V^lvJa)xP$16 zIr>BuQ5IL2D0?_^j!i1@lE$m(jN}S;rErL|B#gXTxE>!V-EORyMe{SVhL9RV+8_eTNBH{QmVJBs$ft~^#5zDG)zN0jKXHP7t1 zfj5k~qtS5jKjN#zjl@^MzC2FlMc%+nP^sV|Y}kYo-o68n1&)?-7UIF0bs&;-0Cr&B z%&VA79WUimk6bu9aa}g@8Nq?4@!dawskRhE${u(c7D&_?So3N8ZIc8;?g=!|aQ=xa z6=N|_Qcsk6W9Ftt9&{R(%WudsyCdY5&y&Z9H&mrnaY`qS)dn&|MT++#sd)8@bl#I7 z19|Q%ZHIID!iq^9j88krY!HRqfGsyr4dgA+o9N0AZmO^J7?`2x41U8)aBIfr9D2)S zF6xKoG{W$;BSxd~%!#hAx`30C93$;83ONUcA}n9giOV~~x_43Z+EBM)C zXyH!CclSOx_lRY}Y$jAP!BRVCdhpld5Qr0CRUtRL=!0hnZs(w3a;Y(Q1Y+%bhFJBg zA46!EtBmBB5b_*^Sxm509ZP}F5&w~3R?w71%va5P172{pmUf`FBS9Em4Z>a~q<*8v zu{FkW*ysv=2`u~31*|JiggINtgr+aRJeLX9#-UeCSj2?qf>5S|y~Q!weguN{B?!Y_ zA%^_I5sP4BF^`(Tgi%a5EC`eKU?rci62_XmMrHCAUc=TtGrwX&^eYf>5DN#HEW(M} zCWEk?3F|(E2F#1N0b!GT631m;1H)-7+(*L*s*1RQBh9ZL2T_kHP~Lgb=%izoZN+uu zPoV_2PJt4vYJ^^X=hI5ed8djXSYjGO9r#ZH_AkaPQ$x&yryyDtW3j`bV*cPDj%8ty z&pc0bromGYt?qObs@wF60hrh3SNc;_8j6c)qfE8aU>INp9&I=V!M)QFT%Lwt0tbUP z+J0YQuT*~EjH`QH`a8oSFgV`8Mbb|9abppzVaf)9#$Bp1Dw_oG_5fzHC?B2T1%^;1 z11MY|FTaF^iT6+Il+OfuOP~pf<&alBkmOx~#^K5X_JJDQ@KXALAw<~*01d?tVnQQ1 zD}@5(+Yc~*`<^x_4Zskld?f&ACzJBE0Qk-WKF*>FS$Pg1KLInFW2J5!S)ikTPC?6Bx8; zEnI#F=ua0OJw|sl>n^C2tw$cKadjM zTQid9AK?F)gK{LkBorCU>?%a4{U?UIJOFB??nA_w`sd?%)aYsnPSrEZ=YN2uf?0%4 zyU*A?VA2S+C34@pZ_1qqe@lJ;1ej!Qo;Vkrpl6*%u}s?q8r^%0ysKWj;tU`YikRU4 z_%V*`eKy?MUKuM3uqu0^6x(2*4KpVe`z1zc>Nt+`7q_Nx{P%<;{hL#J*N#N4JY+?9ms zjuMcqD*Nt;SMb*N(LKE~4o|jV+IHa;*2p_lW7WOYb?@BddtTDNRXaoY$-<2Xn2e0# zJq#45uOH_uc*!x&TQ+?H}kA9BjIrYHh?tafb za$9rt7OISME022iq*a2?}jk3*mXJ z5n{u86jl@$KEkF(`}>Fi_fk!+!$`c3;ze{po6Hu*-#olOeG{FTk6p?W{_voW6YmK( zQk+_&rq@jpJVo*Est)&!bfpEFSpFXGctyW+pTDT^8I@A_od;dqk1mS?mu$gW-K$~n zh*1$YhP%txY9RI%8fS2`>w2fz|1V4J^B{G^DjDbpSaWFk+*wI~;sbGi^{EeJu%#~; zlsiBJl&*gX^anV5LzgfpeZUot| zK?<{h1}K$4Va<&}x4tLXdOv5NtlHjJ=?_|vvRwcfuW=7Iye;DG0~(;zt41EV(bsXO z9`rC}7Kh1@g62o}F(~tZ1}KjN+UPpUcc0N}fev`Vyq2D1P=)^{t(ewm#Mc5H_KMN$yEjcr5*Wgi z!ve7OGv$~-o4gjKHg_#78Z~`luSA12K*{L^Rk$cM0yiMIo6&0~Ef=I`Ov07|=2N?e z-_ub{Gm!L1DnJ?9n@N^l2u%jmUpWZ0J~gWQ7Z&3>K+zvX0%1zyKFnJ_;SwHw0z;&- zKmh2PMk@LahQ~6X0gCiKYu2LS74Es{nYr-lr|7^Kpd<@|wa-#4lg0_sNkK|2{LY4G3d!65}NxIDpeZ${gL6?$Hvmi`OFCl{pEg5(6@j$+0UCt4vB9#E;ydXhcAO}XT3bY-pC}=E;>Y%nbeWi2-Yk;y!kYZat!aV2N zO&I1(+9*iwFsfiyOoQkX8l{j)Mq!XmVG^{9iC!_72~cd`@3u{qqza9+7f;YtD00O>8wMj|)nq_docsQPdCo{&CysjDvLwT1{RcgdV9^v;>I5G7Zd9Sx( zXj&JqUisdx%?jy}lJ>1D;;T;_NBj$3Xvwcch9bmmP$w(AEC;D2_I3R_q6P{3Yf0GMp&ab`D)PT-p}={daN5sTwCD%up)cpfe~6T3Wvp5ipb8Y(zB<)Z!+*Y)-_`{Wack#IttL z>YS?jx}5I%q*>OnoafCtA_!Luk~FSir?mI?P{KyVuh)=3ewoHREUd-Rlss{D=i zLC81i3MxF6ft?>>^OA6`6~D!kYYE+vokj!KWk8e%olrlG28UV^T2#z$vhB*^GLM_V z4H^3p#BBF&uzdKatDObA ztTaOS$z7|BN}V5#kb6@B_$hIyuu+*VkgEczLZyvL+e$bjQtkkU())=yW8dOVS%~^O zDKiAM5BQ<{m}+ecol+=hI|R+RifIL>&Kr~nm|HsKl_1tflmI0YL|L?xxAG;B5T#Ne zWoS8X<$*wYuLcjv??G0)zRo2Y2$le4J77Phsxd!a=(`O=R@ntSOsThq^W!#COkjyr zW(mO3WY8?EBj$kwrN|_;_2@Oj)dC*yGsCehku()1v07;k+Hf#KH!_W4e}oBC*u5H# zx|noCkb>7TNgi7&Xb*JY zbVf%B^e|8?He9`FRE_}%Q<|)2px^?Anmol3-=R4;;IB-;hGtO43V;x$$s^X`^B>@H zM>FVR2hc;5g$ycju;t;_3`iJCqs|{K7QIe6BRHo$^pWcE^R{f(>y$Bw7={OQLU79; z-b9r%2={=Kt0%J351awY$A7T;hFvc!g#i4OVgOiN0l>bF8Y)qLB0;wAWu*g10m?D} zXsuQN)(AlNSR}zfEzkm#aRPwNFDugoZ~*{L_I{7)cU3FcA=d=j9#)yG#EoF%4RZj4 z(hY0@%4&wGDDt_tvK~N);=$G&tbQ-W6C|;AVL2hw^sWDO4n|Snb)eqTZ(Jpl()KR* zz}2?wLdXd!#!pGD4RI^{pX-#@?7~avKd5C;Wo1S%OR(QCAek`bJjbHq_7Jv0 zhbb{PIMP)R1H_!%?{CUXr=$yNS44t#hW7$=?HIgj{BOVpy0$;Rp<;kXf_h3El$ud_ zUCwD4!55%R0gjKoiADd3L;6|t&9xPk&2rc^Sjl>f{E;XX8`f$A;t#N7GwWXs%HS-Vz`J|7OfhuQPzAF1gp zMdchN6$%pVB`UWMNx_WOb8)l5ZB$}zay3G9jL%~HZyGaQ(B=u+BM~RoyEcD;Pb8$c zTTrV7b$~M$F%?wk{O%r{pP&v$EDU5ptx>aRo?D#j5kU=h;kX5NJkY6%I${K*3u045 z!jJ|LUAGhD=xzZo67X||RrRYhzru1!3nV~_H`iQ!50!MoaxFT4JTKll9k

ONoqBn?9RSWEp3BYBgY9pS; zWh+3Z%n$=|njl_aVvRunJtTUJu}uW02tw1x&crbmrT6GY%52>v%Chb&DAIc zvGDq@_~Py-N09>Icj^J`M-OF5U*xP5z)P36@@f{$YUFIr1 z-#r`??=M}YNOtqVEa^KNxC?KLSRd?#l3{fTPQ{eiES^bh%;djifW4$zg6yqiY+xa} z1ak-!+`|mUb>iBDac#Ke@w9~^-)BodVXf#O1|`45h@|3{h_-7ig2)O-p_Bq6S2T5( z`U&7L&P(zA@@+t|r;R*=ffEkJ2Yx0|C{!QC!6_?mtX^}=;s``}ID{_9C{2htmsIpN zlN79SD6Z#C_;$TYMXP_yA8Xxi!5(O4DEbJ`cxbc%<`fH#*5;1Me&Un|8{R@eG7wh} zSt4>&3?Fd55>7rTZs)jC_0Y$Bl;ZB(`NFyLX|(msX;t0f(k&GjP64p#o9*e#x#3u{ zFpF_k)m?e@xhpcj5jlopH}hN9ZkLd!c%nmz+s5|N*u!FYNGek*$S|jI0-2*UdK+mX zKD#ngicDT$A4AAgxpJ9nU3Hq(vTqk4l+&>qWi@gcG}THP=q>puvX|1+vo@kN!em7+ zP9!TC5XG?H|59>3=UHi^U(|V%n72GNgW2186qa%%>4_POy;TR;!@sKvxEexkDQ69wdJ|v9%kS0#J_B7{r*uGKaPVkW zRT+bf_#WvvQms?kBE23;Ik=24ggEv>5nCM;W6=MxxB?_ViS}Tn*%RZ!U5pEZa>T@V zi6~JVhJ{i25x>I}eJ$pUJ%bOboIzalV#KYl7&dZAQ_$yi$|N9g@`w~Li z6;Kq{+!@WSOd-?C3c<2WQ$S%{a>JBk`P1u?rlug3+2(>vnUzW|S-DYZiv`+li@x7; z&YhWa`M;m{^;6tup65CHexBz!k9L{G(6%|{jHpjH2_4qZ7&?Oot2oCGrla^thF{0< ze>1%MrN)TiKVa~ZFlsTTbV9>OK3{87z|ld=MUCO?5}LEMPZ|CK!}EDN86i91*hF&> zn!WAXdWPT5@HZoA4t{1iSOw`&oaRQ?_G-C{>ENQn#IDr?X9sN-@YrAsqx@`WXmc3; zD)6Yy;25%N-vHlU8;CCN#R|dZDU1#F>}Z@;4m9?ybTnyiAZYsy*v@*z5u-H&O9$sv)C@V1d(7% zi{XM3<1Wi8ls8UQm{+LLLD4| zSjH)peD>QFuKN{jN~gMCoo%bkL(DGIHdFe($SmvMierjD1sl^a`o1lx?6R!2c5n(N zM6KgKJPfRD?~bm6aoC-W!xq|z-E`?I=!Bv7roRtbCm*4n?|wyXX8U9EW@ zZFawAI7IbI0m{mW%{(7`YkI>Pd2~Heq(w9)A)1G(s45n z)Nb1SJ!otjfx!lzzJjcE1}0uBW}I%UIZd^j;KZA&z~cnXR-WOYcGKGSq?n0Vg<`aI zEYvE7$-G2E9RklV?Iu>9NVf9Wwg14=LHnEWTpdbBLJpRc2<>rT;q-apPY5OS;RJ8QfC_+mE3tj zV)l%niL)(~_7^xiXv-LM2=pHOA zndo3mRi%4((}NgD=v8fauacE++No!oNg3!rl(W0kMGsMco`N}?_7qv^e6SE6a&i>y z)|_V2R_R&3t;ur5M6zeO?OF+5y}pOO+6~1fw&hv*2RvFl&Eei#Jn+WSA;Au9CQ1yx z!A&0zq5Gq&vs*(A;ZL}qn4wi)YAT(TrgK7B0`^vTRfIBS&Tbn?KB;MIr!R<5M8{sZ zEzP9QZR4#xhS_rvEcBIayo|>Xhi>D-ls+;7Nnc_@Q`u2S~)q{_&(|%wbOx z(9esJLXUb2Kz4_^48GZ{n)y{16!dSl+DSaPxZ4cKTk{1Kx3 zbvL}!qATyYOI&Mf`P$B2viGyiOlvEOtdJ=k4ku?_!o_nNhaYfDzK%Ch6u9U z|GAe;XaT~Wn(y_We|4gzqvC5u9dvdxlv=1zy8QZ?Sn|_ZWHL+r4wmWk=epoeMlDIZ z7x}CCEsC6c92x3y4q+j9?n0I^Xvx=I@Mu18z3Y7rGtei%l6W3JnfBk67ryqA0{XMI z9fZXbp9FPommAHQ3QZaP<^3GQ#H}zs$hpVnB_}~79o;tE&gOu6lu}QRKL%NH0;nyL zK`nm~>7-8wb>9V0+0WjiFMG*U`je^4@o!lVN2OMediGVt?eRVy#~2h`H0;GOuX)l1e>V(rrx>0?9qSwnxq)FO)9p+9?l zg`c^xpbkHYpIcLrMP_lKJ@%EC$X)Ss98r!6r)^G)+-MvP2}TvA*&y(x;9IrBuM)suY4d)2I2kb01+e`-d^avEdM_5j&ZO6A;40y^T~ntIwD1cAa7 zm69*-|6e^>?#@P=w&Gn`ilw$Ih=fVYBeX*ekR|fi<@fNm*X^I8NdAc97;E$=lb8>l z_AkcJmB7!R9>q@w{dqO(+bHtGofeDfy!Y1LfPlqsR+)!7w=r zLr`mvpBL}e#F8>PK&cF!Tv_msmn@?rcVk9PROz(8z2r^G3jsRx%AB2IiCPc5`SU36 z6MtjYfYl0S74Gt@!`^$mcsu112Jn{+yO6gVgB8Ma3_==X%^TlDm45IS#$ZGI+LQAa z^h0+3=_O<8RX{ujF(vd+cH#HHG)xE`lWue&g$)tGRSEKtUV|ri$seg8P~jv=ocaTj zj^#-+IXO#u!fz;Liz6^s8u9F5FIh_?%Dnb*Bw0N2CI)OHBrQl z_c%Xx&UUs5zYxAKysP7yW3{8d{Wtp-`$)TF+hdz#b6NLWpRhKwd}5hnX-oR;`KFD$ zkS#XRT25;!+{#MCbwSSqExlye@92fOakzWmb1@3Nu)5ucxEx5Jhkl08R$-*H&`$sE z##+5(2OTQk028fE!rw0Brx!|w?vSzJJT#1Kqw#}MSh4hSA$bs??B2_H5mOkQ_mGmZH%{MKqA ztc%i?=hN8Ut%;IywUzABE)lXguQs8Ur3ko_wwu8#acLK!^>!gKs=}N7UOk+N@v9S= z{MmcWH>|)Joh*N^9)`%3_jJ)5`?>twd-trHbQo#%%&;Nm$8sbXI#zu1a5GYSpk-$= z^RJqF)+HW6=$rxdyqaS=oqpK$8%IoF2r8F~AU(cp=*2SiUURJmH*-<^cj9^c?hP&9 zDD5J6W69Vv@4-0s1zQlY9f)-+D)Bq|UYdou4@ua?lQd(z-L|7}WGpruVX-mGPqgt)5 zQu~^JGjBDIFO}l+bky{F6y1ZDl#IK{ILFR>-rU!lc26uT2NPuLy zSgW*Clfm5gA=Pa5YzMEF?WXsY)Ts=)On*nQ-??Q8`tLpXjVW|qhtAm5^T5opOpDiB zdbRp0h-?SD;tl+Ko&Mw-SoV7|eq-)`6MkDRAsVkHStw%VEj)sJxfl9gCjG4yfUT3J zF&gy^qg?^QtJN^XRi7t-=xL?V?`hixc0up9^hybIOf=gtISSRx%18FK{;)a!T@y_u zUglVgyWB?fZqIE(oU*(?^C4QiXj|U76HX2M2lXsN{(Z5tS3B`N7F)cW(khY+UftPC zFLZFpfg=M#cOr>#h>l)VMrl|6X^bpBi+V=>bD9FP^xG(O$?dV2W!Oz5xxB$0t2O-w z5$acy35Y6Idz@ji+S5DVe1>@+nC2S2YQ?;+xsl7~`C!nHYG>`)-7?4#*D;A{6OjmN z^fV^HCly*rWk5s+PVApyT7(#kVpiak9;w-%Mt7FwS$y=f``g+3(NNV}Weh(i2J3K^ z&ZX8iDB0C1s7Z2ph1*NU)899@VJaj;+v;FFEdmG+*-2>{lsRA-*|#40K4oCPH8&VA zd?-WK7qnUarWqhL;2?MV*6Y$l&ZXXsRo7qm8`aKz*1g^RygSp~(zUN}H>f#D_UUMFBmO7_9 z2Rd88)$pnCSHtte$A%|{TO3y%?>JUDraAgJn%aM{e{6rn{=EG@dwaN@ykvXJwwz2n z^lw5R^C|OobAfrBxue-?x{3k0()6gQx2dsmQ#r0|Ql3+MN*hI%zkkFnKB~T3tORE zqesT6Qu{c?hkMz61H9(hK@jf%_8v1bj?A;=s-*vbQ68ua7VI4uFa1YW3>XzhjfJY_ zCdR=*#;Lsn<8YE3PJD5u`{ETJ$$qFqEQvnwxP|;)(#}J2%riJd{q>b+B*#JF`1FhI z;>d=N23kmim%%d61{MabXuQ}$wyp#2^81K@LNcD9Iy4YDFW=irB@OOP_G)*IMBt`~ zfe|C{d?1+uEMCsax)*F47cWxDntMliwHUNCw1-O(+wP2Hq>z#9fj%ihZ?tm)v;0Gr z!=kks4p5c{lEUsN#g+NjRpRfCXzm`z!@_YX2$BELNDGH99p7&uGY_k2+R7mpv=ZnWudUGW{ z&yqV}CM!nGfMG`(Dylm<1Tv|)hEgBV=XV{GU%H=MD_SiFJ6gN{V}-E~8%rjzR8Tk8{XWNc(J3r{=L{`QAoK5|PrHKk8UW-_2K>(BCno!_Z*)dB(egZRQ4gm-avUi$cv7oXPwi%k{iW4=zNhK zRuksl<6h*>cBi_dTtB*wxc=vQ+BMYG()pkBytB-?kfg7Cw@rSAy{YXN+X>rd+icrN zTU(oK{Spq3U$8!G?P0BN`Pp*JvfeV&GR)FS{ZBoo?pBM{2i0WNX})IOZ(d{0F%K|D z;}zwTxFDTp8e>W@sW>n0!{g(RDH%93{vv-YZ;|K7_sJb(Rl1D#m6uCXrF5xbSdIGv zav4Sj6nqI?)tn5KyxLukAvr0pqskx4Kn0(Hrgui1mcRzXwVmh!lbV*RWM)nVy6nB^ zcge5$yijB7_?j=?T+ODA>WKbcdWkh@R2)edkIwMcq)}j=3TWmNbUONCEPBqvH1?6u zv>zFV#{klOqvBAPryoUHT1pfK-u2=%^iMmPK1W6JX>=4nJxBFG?<;aq8b)bE>Eg6F zQ&ZY-8591ECDT)onZ3scdZpJ$^Xd?+H_OL3=mBcY@3BRg$K?{TP0*ZvzSG5e0f(quEzqmV352O0z5 z^vC-V!F~*Yo}}b{FxSw`9lHi z=UzbCfy1=^DM;>48)PPLo*EcSPR+WF8t#kwU%nIbE`_vHJ!y|wGZ-<>{Rp!ngIuX7 z(eHgndC3(k;$~2;9ev{A)u^WD;U9%g9CQ{tHw07X8ggZ>8iy?)`*a{lN0HO=5vaRv zM^W`X!y5ni&uy6Praf8nhS?=!c2-o+nL$0;1T+%eNV{@gc;}?=ukk%l-hrNOA zXWKE`2HUf?5w>{iUF+x8a_d6tBx^UT+w#4o((;le&oap3!R1jE46+K<@oJK4Ggq76 zHNR+n+}zjP%=D}26VofE=S}yS+MCSEC1tO&Oql|Q)s5s@`9ru6oF)6@Ht;?5g;XIe zma?T3DJtwnXoA4~!7)6eUpg0qupi$3P~%9}f;6=Cre$7C^?NE8#!I(}w0Rl4@@+@U zjM=;lK8^MUpsYoUEM(he@Gd8tHZO~Z;=cz&8=ZuG9wixD5VEX%y@fPfu?TkqJJAiM zwpi?i58&(-i{fBj%s%r-)ho;5iK`hJE^g~G)O#6;DMQdZ&qiZq4A1rA0nXg;+&F0( zo?JW=M`|~tkG-`G2~URi2LwI!;99t;dUct{Ed7NEi)62z9Sct!nXl6Hw-%uLY)8)1 zpF?yEIxrlapzA|)Xv`srJHU12JuJsvc7Q9Q6a)9z#4HbPijhqJ?aN@@G@zrId^;4AonAh86*Fe}Ub>zy0Mo%TR5ybTl+g~g9n?L2uqooud_Y_M*HQfA zWTXE&DrDHN$mVt*nw`$SZ>3^yfoFdzwqvbukHE;E{Ni9#?%Rk}-Qn|^?x+2xaL)c% zGGYhCl@7-CWnn4K3?!QF{YHZcD^=zWgl{ns-EHPe2>;**$of`FYD#WWQh5tZ9>wcH zI(aWL&>zbyws0M7x$(ACJ~ezVk(iZ(lk07AJm>H^+BhWp7xvffFW4Wp_arM<&T23v zFa>;PIcOo4>6U?(7V4ksDfLw~A9u)!aJhTMywAMC{0My2H!|HcePr5fnr#|sYHL!I zuaq~G7nFyU9(Xw8C;6znUY;orlUqsuN#~_|}@HxQut&t~4mel6L}~=i3J59w~_w7A=Y+ zz4~^Xr;x#v0 z?z?VVPbaUWZFo|88Y$eiJ`N6p*ymyXnN0SW$D;%d%CRh)k&~M23>|W!Ikkp3d!U?J zB)*v2XGg0XMXqAO(80UOuVWc&TJDuzBzu~cV+NwzkG;qGBDO+|&Aq1%d2t{*bh0lt z%Y!i496Je;D;3bW9Fv!f_y|?I_tZi!DH}Ob)%Ko>K$3m6J^13eFS-DoJJJ{O*)0-A zE(OW-4JUNy%G$*it^X%9M7&mxGgCaC7i@71)5nT?F$!AUIgi>pM`G@r#|Go>arEWI zi%_-q5os~nNuy0^!ZrxPqoc^g7)BgNe3Te{`;3Le#y$jEcMOqjps7+C-va*G*K2+% z1kMP85K7)eVB9|}KNcay;Yr?-wTsbEc<89{V$_WFE1Z|sF2?SP&YjmAFZO7|kiM0C z*LZP^j3+6QURoR{%_SY1fcrorI(}YSjOTo|2B5O-mvlN&Ow@|J%OSgH*fA!EM8&sm=^A}n5$|_}=(no2EQ^yH;vpicKCAX6m>7uk(S|&}=PaPzq;LBE|qw_0O`w+_jrrid$ zs`dduuQXAM#1b@&Y;a&pS$wLY7Y~0YzE&FtEkO2}N;2#xu;^@EX3>;mcB~#@i%7+( z{Xji264S-}bsijJvG-$;n=ZUt3)i7}6G-7*C1e&&Oi1LvV6=Bo56CI(Y!w=QWl4y_&lPJu)zG!-sUPnwlniv=Qw5 zvQ@@W!3PZ_~i(NKl?lm$C%>GrVjd&8_r`)*eLMSq^9QCw!38fswF&vm0^(!n% z3lQE#*S&M;GW6U(r6&gpjX1v(%iIIG;Qx_Sx=tXbmL%prEY-sgW1})kN72IXVVg0ND|9k=At()roNkOYF!2@a56?9&$34_~RA;MC z&<7Ti7+gM*7U_>;CZ>pm@InT})%!6u&jf;8eR-6sJwt~`LfBn9Hse`%+KSyM@bUkH ztJ3w7E6+8^<#GPa#{UB61UmjnLT3Bs`TOiE?T^}f+Z)?%+K$^c+McudY;A0k^$Tl- zb+I+unqrN#{9ri@sq<5oA(mM6AN7p-nz}%JK(qUriy-02}d*RQpb0T*}1$YC0} zv+i0Pw17NkcVZmVJ=-&9ccOJagb#$BanSm_ijR!*GX0VfbmOm9?eu8T*cot;oZP)0 z=iCj~yxPf1db+&+8PZ6k$a5C})zIZvJ&#E7^j@}x)R6n>rg&2_y=d?>Ss5#b?I5Qj zYq9U78OS}&A26Tdr-X0&1L8Z{j*4hkF?aNl%Ugf6%b1Lo;QV46XuH$tdM#kkXB zh+HzFNo~BA!w$JGb_4U}hcI3!B!|6ADtka(rU~&4SrJF_n`0enRC_#zlr=54Xvwco znfc9>i>SfosJAkD@?nr$yJpdfK}Kcbaz;DHIPa<_=ymT3kCqeN92GIO_Ba{>?>9EU ztQyNdUQ+lAfLXyAtdq^*OAH$%_|6-|asGh(~ zNvBr^{Tx+}IrHrsUg={T6fm*KQKXX0!ilL~E&SPTLk5-~$2G*L1DJ+fYG|rPvk+G2JjwdO#}yYB=PZw{}RFPX_AW0p!qgx3P8Tigjo2?AzD~HRD^J zXM1|hSwRqwCB@9U9VdtRNtSj3_mlK^r0zhG4n$!CvsXKT+;=3t9kuc1cUY*B`Eg$D zH10RziSygz7VSJh92aQcpK1FF^}I#%V0yr1F=>ZGjRX{WbxCnrYX}BS-7SN{NM`Tk zM$PE-@R@U`bH4L_XD6p4{G0Ii!&iqt5#B!>=A0edw`$zl7FK6k;CBinshv82icyxT zT0vtMYHkCa;MJ6isF>$41Q>Lh?f@AyJ%}rJquVjuL5kak5ZGLAO1X%!L$^mCbOS;n zw}6ly)e=8ygt+gn8H{S^^&`e#TjKiRI6BbUYw=PA`;e^Z=t4!7yI#{(HXQpMi6yR` z1r~BEu1NJCc`b$f=A|=!OrHL!dypF$pJ{c(_z?-@wl{S^@n@PQQY^PccAjq~AhU zOly3f;^$DpS1jYx)&QOV5ud@el(R#We+n4Fe+P@nDXek$U zsD%&5`d6+U$K3KdM&_}Xui;!>hQ+HDUBfTqCQ>ugX_;(z6}zbhUsCWYt^#y$!;oJC zVB%X?v>L3t=9wl%Q3d!UDJjgo+r7&DjQf7K#dX7V%Jsaf4eqoTI6H=43SSc5&GD_H z#F1{lVP9+SkIQY%Hq?6Cy3IPu8fMvTx!+<@-&C_zm-#*Oqvi%sa++?sNBKy3Mv0YA z%X8(n(&yOERBeb!Bg&g+ms>Lz^fu%9WH+gN?qw9AZ@orLW@+n^xu6#(_`2hgffBgv zl2Xz+CA-fZox)_8l+u$^q~pyQr7@FSQbG?s$X`6N#@u+1QqnjjW7J$8M9bWUOK3_- z<&???Ai6{b8iyz;oKiC8UvKG|JFO{mcaDne$C(+f8^>kGedpD#LfND-vyY?PT{)%d z%$>pXXR`jY-A0{}8j-U+bm5ee1%6&sJfwIhk_PF_37h1{IYDD)Yn0N7Q!4uNI%q_# z(pZorJ(U-53&(1jnx4@R9rd`Xi@e&No-hp}Stlb|@`;=hS%G_WM$wpY84Z-6$5#1# zfW&OXD5(P{-HPlLh%aX3yDYx;oRTrHd4RHK5HDyuPLaOt!ZSzhy;Om@TTgF0{9_-h6-Np}ir)bFuo(tPKf!P?-2x0>vTI&Qno50HK<%C;R+o8N4 zZ=9-w2=bw-Hh1kbh} z&|mK+qw!#@8(~YzT+mz(Qqnd+$?bNHQSRZCq{Fz3=V4&J%P7tCFfmVY3d?L$jw}qZ zuZ%`*Ac-bAdv`eyQ5H;lX?`1Xitmw;-gQ@7Ay+rIUt)wtoKRgbloPa#ISQ@XCJP#x zJ6oMl>3aY9dbXq{3mWjBn`T7gXXb+XJX~JV)9_RC=H)p}3^k9?!t=BTC z@q$Qm5+e1aq&_iya`qFCPJim@)GnR6Bqt-r2p;3Dz4z)d;+dZaDe3i0Q_L1qX3RcQ zrb~{nJHy;Z-3#4=-A-zv@Sn@v!_@9UiEJA2M;z%m$rY)Z1Vq8VHIb?!An+_qq$;N& zh>Zuy9sX041M{rx{vym>oJpB|$$aZI=u;F26!k}>a4(8$q!rMFUI8K>7@@(Z7d5CgEzDHRt{j5xp4wzG= z6=sK&WxA+zHSJa`rhNH=Dbsqzx(Awc^Q9!~C`q=qv^`<@!`9RCp-FVIT>pbn;)hyi*SRM9>$Y0NuL`b$7o@u+k5zhl|DzYfc+I#aGn1{hPkz4eSqZyhVc9lVmZbz89Re4M;W3dggC+wQu|*5 zksM}-su%Ku=~Oa|CuMpNQ^hbDq$oC!_Y-8puwAV!JntWoPw_*~_i-K?nbLCqRIn8v zKYZ7Ik}+)8VRH7yG#JWIr1?$8Max2No@=3+S;n^vRM7sobN$?YM#ZqOP`G{4G7ZP2a+j<0I=Z=ihDjIl8K-yq{vl8m-`IL2;*^CP;J^`dc@RDsZn0XjxJTcBOjLi@*vqM*-uHUq%l(Cu!~{D6Ju-S zPAQrd?lYSto6Ty)f5emUOKcOCdvn}`Fg2ySEZiftMUj|sWxsUV%k?-@Nqh~J_pDMd33@n>(C zEyQn5Ey@dqr}1y96Msrkt|55F8~1vf^1e=sZ%HYdZlK>n_)zVbQ;KppS+BfXo@N5s zno{(nA#|nEQ;Z$KpU~+xR`uQR3`^1MSTo0+&X8d1vEp3fnMo&#*jivQEvl%(~LUo?4X7=z(V1bRo^yOld_ugAB9?zOT~i6d|pshk>K;T!7CD@HqiKE5K(2_%i{n(&2o1Y&tCvP6_Zy0sd5gKM~-M1^9#jeKv*Hb%LRCu0528b7X)~T052BcMFPAq0Ow=)`2#-y7YlHa051^W z`2t)hzy$&<&MWf-{9FNk9xyzs@)>ZBK$tDS&k67>0e)71pAq1h0{pZ9KPA93LU6vE z7y2RR3h;CR&Jo}z1^5X8eq4a33GibA{HOpw!eD*=pDGZh2=K!KJXwGr65wnB&Jy5B z0z6TG9~9sTVx$iUg!=_}ya10A;IRTcMu0~P@O@!n%D#+__1ei)8ka7SjX$k2>xCXA z@QoB;p8#hH@CX4OF2KVCc&Gpm5#YfBJV@-E_X>o80z5!~`wMVC0q!fneFQi|fO`vY zFCAu!Ub;Z&DZo7hI8A_41vo{3y9;nP0q!cm$pYL(T=Y5%giZpSB)}a7I8lHT1h|6$ zw-?}c0^BwTW6^69{1GO%7T|aRjuYTk0^Cx7V+GhNz#ajP5#SaS4lH`n0-?D8-y^`y z1h}aHHxb~*0^CS|8wzj(0nV>4{HQ0uQ34z(z!3uM7GReEI|VpgfF1nuY$d(v_v5BN z+;uwNwaRs$tD*Bt=l`4&o!;>4;oBipPjK9F?1jc|n*FZ*u-$JTY9tnro`mU1ZFB&&a^@2d0E-m1xb+`QC00uMf%Gp#jEFhwg@l~YK$5i_^KBc8N;U)w>21o;_2FTZ$2oTR#jo&DQ+5W}sZ6Y9i}S{xnZV71t8p-_t!)ME1>o#w$eCrzkDHWcqM zWFfMo&QNP+AZI*fI9y{WH8s%lx_@izM@iCL8EJAK_0>WjVMEzrh@j-7K(>)XbVP9t2g`MI`WKS zYmm-{iP>%O@oTW0;Cly zhWPWIRt+6cQPPTK0|(Wb5oSX+(j`L@z9xnXVU!`v5JL&C`c-ir|qo>0KD}p`t`SwxvmbTw*AJ~ZPFi$83?fiU2vc<7^vJRem-gA^mw=K=7%Alci;66UT z+*w_$uUHw?^Tmu{Zs3S|2L#IuU-D8`5Z{ip-+4Bf#BV<9;Syh^Es5EDHjzw6Y&xb5 z{gJG7ehpf_Tk-{9uk}WLDX&KI1TZ!WW zZK*5=56qrxVJ6o?!CNur$}Kqofp$lRIGs8=AP-ks^59jeqjh#t40 z%8DkpHUZOl*IBO~xH;*tE6^P(e+_kqd&q=cfd;5-7f|R>n(@-M`=yO4Euyd{!Gt8( z6PmKVlD+?i-dDyk_PligB?YAkWANM>r_9j&*_soiJW=rvQ1oZfx5D;fA| zu=b)zZ*(kTE(_LPB)Pjg*bZ<%Ru=r>CJ&Yc>NT=uu>M>u3qZ6XwVTdb$>-%~z5EVB zaq`mkejR7Ec<8)3QF znjVf?>!+jrQlZpevW0z$7by9^lwv+dVkzXS{RnZf!KN}WKXP(X(L`~I#b#iZVG8y= zIj#=fsGN`utx@A!Pl21AsZ=k0ZbLE7P6&jrjv8wSpHj@V z69V*G<6=ZQ%4bwgsQk;Y8Nosr(oUeefs96D>n=jYM*W2BrJoC{H4-qYC;0v`B!I*H573XSz5FBCVL~Cj{tuC-w{LFxxB{H58;5Z-^Cu z0)|u+Xad#0b`~OFyB%FcLC;`zkMVUP;3^7%<8{TDJh6t@W`ip!pbgl5)YtA;p@yt! z#au%nP=s5f=hn@EuA@LBKo&g<775`|MO;TA5I*T_g_r@lSt$-_DbO( zErqHpBkPorYboeSXc?WMXJ&{Yt=OoikS9+PGr?*hpr^oh8dMKi7zyYoMm>cps5XU; z&}gTSngT6@XWXko4iJB8u~APUW6OGR-%W=K*HZ`#g^G29#0+34-)|Vog$>Ka45So~ zH;}9U98)LwI0Kz3pb7a;DdtKFfz*9Zt}p~oE#@i;w2EmXZ@PKA&Oi^SC~*3%wi^xM zQ;Ll$3ROdPiOHuHhcpyu^wr(GV(nN_I{^g+-aOfHhWK%X!8H{0E)GpR8{aad)Zjfkfs8y>B9EE z7#cFI*r=ya1+90X6s#t>ozV#efZ3Un@^2}mu>Li!j-0WAgIkhiW)6FUl~ z3!|1o#UBO6I!Z0(N(!_FX#5!q@{KK(Qf$;x*!0jWF$LtHj+O$HB*l@QR?M{&0##i2 z+Ik@esDQNMkeUK*r4n2agk~F7LtITEP(z;n;-ZbY!l6CM$Gtb$_SugzC z@V()FJnPolaocgivCi=XI}}~DR~QaOkJ?h$p{UH7Z_UJui@#WoSXPJqM~_8Hmc^nZ zsb46vx>N3cMx7(Sq7Id3s?FsA<{G(?`JnW@d8PD@`5|eExr_9GS#?*rH@ly5_j5OJ zU3b0hTI{+%?4*lqDi{YtqxM=7tdu&FGv5{kG^mw+WaAF(_WC5j_KQmfl}x&OlgX`1 za&V750*;`ACy(LeGJfdW_hD?SS9NG<@bEH}IJXALLm0W@gQdaE^I(R_J|4mhVwk*- zLYR9Qrt(AxGmv7mysbFC=m`&Cm`aM#&oBM?w$ry^7T*e6!fsL?nN$vJ96aYGv8{Ph zFFagARiMrM@RhwGE0{rkvY{CLoHv{0rfOA)C6PsvVLhR5$2A-{_5E}Lc{0C#0?c1= znk>9$tIkmH%GuTA?4hN)BDQpBAL{6&q!*5s0YC@nW8 za3he+<4nudlZ6dlWGFxv`XiZqvu;pu+;paemGB@)&(@?qdIVRmRyu`+!NB9@^f)Sz zeN9@GYs$|E+YmbcRJqo;#=4r}{If|v|CEP6g-473a=h;-z$1_O&h4%OHkHsTSSMYwzR8w1~TK%-rP5wvTFBi)F zWUKUvv{F*B{xwEe8G)|k`$DQun$%;Xi@ zYU@nY{(xkOFGkg0H5B7lQ~bJQNl#yTwN{89WTRv$?+v4NuHPtFlG2Qat5iQ1ECp6F zA7U3(w^RH9!4hBT(0D@XC><9p1xi=)*oC^K(P z=W?Y$@T8rF=1uj7q)IgZFqo=Se@2;7RcBG!fW)~-DG+;J@l!(L=2U-3qC{ga!HjBb z9fLrL(5oq7L!IUi$&+Z^Ww$>p)Ezyz`Hk`H^t8dN=TmNu%c0&gOxuZPU3yI%9kysU`_EG#Yss&b`j~AM7TUD z5I?;M6Y3cBfH;Y#zvP57b=B}6|+^>c|5<)AU- z{hCy#rw1fTdI~=pbCBi_iImth0sGH7M+U!9rsSzRQF4(|pbDgy>P&Q8pacsWHVx6M zaUp)Uif+PXN`VsGs;q10;OA1MKnj)nGwW0`mnsG5xBfgX(lHo~GNnp+dmTC#D(T_1 z!Z)pki7?I2WlDiY*tD~!Fw*Fe!EY2QNy{%A5=iqKrAmdz#*1~p#wwR81&UB26^b!n zKHxH?Ko&CoI$k#mVTPgN`{xZq1gXU#nGzBq6`zb3$09wWbD2^g2cF&Y3=yOibBR(Q z2bD2~vnxCJaG6pd2Hy)tGlb$fh8$KuenB|*nC;fFVd+wmr2L{BQC2HcOqcPTb$gt7 zPU7}9*O}pr!kZC+Gtb|SkFBkhu9n-1#d1QvU|9#d&?jUVgULfIZuuVds`Rs3AstW` zNGsIQQkEKLDl`9S%6ETe%5-lrdE8H%kGuPuUvhVEXrAW!&Yb3Y+Z^s%A|=62^*_8- zwb7A>?NL4ZxAwizo*8RzWBc3o30@G$we_*pvwmyci(%HUd7MdE9X64`F!p|3JT%{$ z=?QG|a@i&?V{7p0ekwy$HoYC(M^DkJepjS`*yUku*}wRuZ>uLW?oI1^2F@T4Q3Om6 zzgZkSJ7sGVaq0?$$YLQ5J@;}j{z)4I@1fRg^(JO;n-%&vf-}*|D z)(eKRf&7kSzqlk!^WDBDB*mD<0(x%$5jasj#t>4QBf+>IWteJU0%yZVtZYZ0`JAfn z=y`d3iJ?H#S)i;wcLMR|FhrFn#KKMrIAER##`^^2!Ab4we`58jeVm1&xzV@u{JcRk z!#40*BHCe!b^&dZpuGrqnKlI#nLrLd_cA<~Q4?9|16sAZ6p+@bzVm;ZRcWFERY{Vu z^36eXW8PX}NDKLP*2^)0(mud<^uC^6E;upk(wp=}!793+WwQ_%J)3J7g1!M1=wstq zl}R)OuF*?gK9!m}hrZXmp(OKA`nfRuMSVl7z ziqTnGFhq5iMZuQ1Pd~+2GkKY{ng1wwM00YVJ8)XY1cygi7}}nCBwSWXo0C6l;NY%ZY)-u?72K2|A7&1!j={BB$>q@Rn1LN4?f&PC(l$mObFa>~y|@8OZU zzIdYUWBYP@rfzad&qd$CsVpSfK88ao~#(MQ=Evgn$0%MqmiXX zbEy=+QL0q(v%#Vb@f)Q|d8>@(U}=6XRSHb>zRx!cOAWO-^&7=XB{PPLd*>8CmnsF8 zfylR>G~_?c&!tL%_;1C0D#XuLQ=?cZQobw3pXTRMrC@mTHgPefm#BU&R|@2y!Wcn{ z-zZcnM28d_02N$EsOk2@Yoh}sZyYZBHwymsA0Be42YF@0@bgc5jO)6XZV9s zC6)tEsZpGf<~NF!N{;`Z&46E*E9qs3bb7@+V$&BAEYUK0*5-&=z<$Kffe zMu1i_h?TTTOw`6^Fo>1d!YS-K=}oXdiw&eeHEkMFPi!c*D+`E~^oGFWKgOP-i7%21UwTMUneo)F9d4KEcM&0|yi4;Y3zT`t7*Q~mcF!uuR6#7d;? zXp}2u_b>=ZQHe&mQpNf|>mIy8!w zGLo8!bW{uFIA5aZu8|SjZkEQ~o+qCA{J!bW?P~ zRKG4((qr(It*hGxT&fhvLH54|Vil+QxkxEcLAMHD5W~at)X#-Vf$))j!&OR}-zZbc z=plix1?2F=x!Y0IkSc#9LyeNZGC2n&v%-YR zYJdxs0-dbtv+u+RaE)M;DJA{bNu;Bb^)O_>bLSavJuWn%LLoC%mD^#h&kaux_PZG_ zHI(R!JnyQ46^^nZWGkR77vNu7sC|6(o889tZ_0xZ~;DS~a8 zGDzSNOv@C(v`o=W%b4k@BG{HG{e-}RZJ8q2mMMa5nIhPhDZPZSf@zr|n3gGmX_=BH zn4T(vZJ8q2mMMa5nIhPhDS~a8BG{HGuq_J?Gr_b>=_KHTre!caRRr5IB~f5X5a131 z++KhM+cG6&TNb7Wre%s?TBZo5WePPd3z(iNf^C`7Qb<6sEmH*BGDWa0Qv};GrG*ey zFfCIA(=tUcEz4J$2>}G#GDWa0Qv};GMX)VX1lux2uq{&r(=tUcEmNX|w76-Rt^q07 zmMLz5$tA!}0S*^nhXC6J*e1YM0k#ORD$d(xfnX9~MSx`imIOE~1j}~?_>KVoC&2&e zFtft^M3B)6H(t_`TR?F;fsQLCHHPb#?_1NRhzu5jVAF-VPqF2gyV`o2 zEw)J01?$(=H%vRMMMytPTV3FiqUmMVS$^3C_icl|3yQB-@JpMdFCPvnODtzMJ$b{w z3*HW~TPK-Rl!j#d{H{vs^keWM>ID|8;%<{5W(mW1>U|c(EM}Oh#vu$-w1#uR&^6&g z&NCo{@$;CsT?ocg%rJQwA)X?J@$|SDxPM&05V!iB3I>|bFok`025(slwPo}cR#)OE z(ALu{R9&wmpK{AC1;b{Bm>UUkV&1b~*cB;ORbo&q|wuGaLj%N3Og=tDfJgNlUa^;2+?4 zTDcsr_TiU$6|QjoBJ%;3zbaI%ejgXb5EZCd9lKtz{ z4qB}o0S~0KR+F_YsLeK9C+60;cl8o%y^Z@@^7K>314SIpf+r2#sWWu^Tam`;E%Ka} zg$MyA7^wvY=EgD1G5uL}nt zyQ-!vf^im(DC{3%LYQ&%fqD4-pDLPuUB(43Tos0&MDgq|Om@p;3rPd%TcCYwAKSL<~ew zacQj>0WKmIaw%dUkJUq_iD%TbgzU;bM2Pu9oM$`+TRUK|r=RU_EMWj=kP)RuFav@@% z4k}-GDt2tpBFs!(>3ZyA@Hc9*;7a!|*C(!xa9epV?(AnpMYUlgYX~1_lK8+ zPYF*6cRH#aZ#kAYCOSGntLaO78MGjYugPbl=$iDlO^$kqRN{?t)pi?EXOq*zPQ|I# z_JFhCSnqYfcK}Z!zVkcd%_{&-B>9oGajGK<@Pl}PPNwuIF5NlQEiog$C83^xyV5o;4$Rt>yLA%zoScED~MCW z`v7|%souICA*lo4QRMWIzHzGhD&UdaRgpOhu#d#Nycii94mgwgv4fM~KLL**65PkB zwo`zIm-@2e)UcZXnOifkX9FHeDpF-c-xKf<=2cU*VFwX4m^!qKHx~jNL@LXCajMA# z_+E18zM}}b2k=0WIVc%ba24U|pu zud~SP+~}EY`!q2}FsW4-DT!HC zza-N|vZ&9uQh0uat6d4cb;-nD2jRS{E4;7{FI)#F0@ML)SrE4&h?}=Sr^?}7m{V9e z+@*h=`ZrwvI`pqy|Jq3I@@RNu8V3&p@EYsIVoMQP?Z?#J2g6u6OMZ~W%n)#l#e&X@ zgewNdC^LVvc)p*yorUvb_IZFiq0Jm}3f@DgSKE&B;e3|5qa_o6^uvh-Aldp8iG%|W z*?NP0N!E{T;JXGlcVzibIuCqWMf8y4$>jio+@VZs(os9?E&tMV>Y;ZSDM?4oF^4}C zLx!txL&IrzycbEQsP5a4ht?8wo-PZH^=X#Ad{e;+1?^62l$WFveW+wF05 zp2&uma}sm<#wxOWWnv5RUglAka>0FpRONQ9N2cthZl%9rQw%Bi;1JwsFibVPFSSC4 zzrwyG{U`M0ey_Lk%GOI9vpj$yW%-HlPeI*NTfgKqR|s=~VYEBnQ-4~kX$0^px)M>7 zL+Nm9u?E4&2I`uMLe$w6qcLYyjK&;UF^D;s;#|ML?{&O1pZ%S~{?2B9pJRV#vA@r< zzcWciGKSlK87p)b+2L<6j2!+tr!1epEkM~rooA!(jbdMI;s_^R^nT`lK6E}GN0kYv z5v6aTe!}PTPa*#l&<{!Pfoak77Rnkvk4~ELPdqw^f3QC#6Ma5Gm(ELun(Cu8eU9UnEN@RJb)ll&*XSn*~XSqICiEhs`Lu`kQRfe zo0j=VX){Mm4T&$)9k4PBs*xV^MeWvdK+u(be+6a?CXx0werLzyP) z--q;XHv5wFCw{XM(hcs}O_MlgImKLm*aQ;j_RVL%0sA5I_ho+ z)lrKXPRp9U0A*+e5F3k^6IHDxKT*&@v+nW-F-tjSVhFQ@ zW6DC9MI3X7V&YP0DOVCt3gSs<)C%r`QfNidb(7M_ydZA)2LasWh5^2^UhrAW<5)~` z4`6<~b3go{%{+kl+A)G-mXFjiB;iaV9HJd)g-uToH!p}=J|VzYGbR|vK$3@?$!Y=n zB3u@xU7D}x@j4`{b2%+1gqfpoFyC{Gt5-18b&i=xFNGNc|h3f8C@b z@-5p3=->YOx1av)tAG2Dj_XlD)5}mF$?H)*_ascg4BAK+GX_lx;#xE*=QwzQWdT== z4X~eT4AQaHsPy;Aq1El;rT6&fUH*B8f8OSwee6RHzJoP_h1p9Y)*ws~`)H?zInk;O z)*!O^oKVO=1?)o-zwvW2(W(KBE=5(Dx01|K6xqCieaQw1or_cc$1yb_%u5_I+>a*g zPf+ZY)Y0bv(#|6_AiW7{gt_Ttq=Vz(Jeooh9&eze+CgwW#t;!4G3kDtgS$i@LyuI^ z(Y5fuPL~p85b?2tS9w7IGmvA-LYV#>liwBlD;}yZ$3%~ZQ@#e%e)DK2f2NgEf7)+e zE#YUnGov86ybwP0D4m?_jxoY<$1D$FW^&Bs z5aub|Z_wH7sQx{|z9e0y^V10APOa1_hxG43atkTTYCQXr1NUKO{DYQIZN)LJ(Lqcs z*+XNOSFkVnbQRpV?M19wIlLfpP?nc6qIOq6Ez!d*CiCVbqDKzJX?MY#M0mscAC7zs zS`ox04ne16xGOH(wR<1UCi5JD!b)j4IyX;L*8<-0peHIwsm6Ft~@0}%(Gp6t_}f7-!qBe?SaI)k-v z9%`Z26gX&ZIWIAmoC@IJhMY0AxE)|xv6Fh*1G`2YIC4g1J5Xp+ifOI1KwvhGVBx55?tE+~kKkc+8d?lrj6E z4zXs-j{%TuOhXku^iT({+46k=X|{Y90FS@$+8>b1;k455-ii?(Ns#C4r;001T8gl3N%x5 zEYK;P$r`XRYibI$Kdi-jt^M52vu|r(o~=nnmJ)av{2v|$iwq^w z={zD~!o63^_oX!JJr4Zf7jH#waqm$m=#8K{h4L;zyA-OGXyXXHDP_y?QfwHJogn;e zv~~nusKyBSRgvTsw$Fd4clOF8<5?-FhClJ%KYL|@*@Q-)QjJn+GgD-R!bYP+k-3VD z<)WvRdXMdokJsPIoyYzX{i0CqCFo~`0%*osUjLSe0$gTQ5?Y|dU-Tg52*?TiS$r&N}iquEQLy`#hgIdm{qPzH3=AXs)aqq%|Y2z;p zD~-Q|tF%f>oBP6I&FZ*;A+6>7F;&(o7bCVOk^H`D2`*1Kzo2=1QNhKi{S-MViE#c^ zS<*{v%V*khoYrQp#HT<{Y1t$t1@P5c+L(z4C%Y zm6AedN>u$u!AMParS`c(<{IyW-^_|Mj@ZQ&F@Ac5%r)0+o=c|g;zAy$o zG3m2Ok-#dOyo`5gFFtU6S8oJta`xgyw6(m#pZMk68&9>i_C@wA#f11$rEETnTaaF{ zAspk=NjOHx#JhWR(q8mXU%{J*`9G>G{)0q*%-T#ds0S~|ZP<@hyH zP|VviaTUz3R$wZF9XH`EboZyNH{mS|ZAUaok-m}$_w`!#sYE3}g-ezrLd|8qr@aLA z5#VPaJU+?G(2!<7d?+p1aJ6fcZQ4%qE9$gAjPRlZD{wGSWY*up z)qz6o6bi~pWBQw!Xp_G_6D>$+!qbj&-^@gXw#_2$Rz;e(+T<5SYVS?csoXzP*%Jej zsKX$9Xne&J0}_m|0^e@SOcLVH@*nha*~!?f#3m(Pp}fZiAb{1l*-d|XYygV%H3+xN zdTc-fWgNCxq0LO2)!VH8I=r3t)?_A#DD>EC5NC~gTuLhY97mX5gZqklOp(S2nXP>v zk;5WufpbsIs(ED^WR0RuhZLQEG*5M4lBZMitANF@lC? zq0k$*sRaXmvYr@7%TSjHXKyX>xq112vXqp{11{`oIQfg&Ny2%J%rL@_K05|Jsc<1O z?Y{A-I?`98oY^}b*OmIYrPWVDMwZqxn+3V_ku02#dy*;gY!>8R5@!{kBT8k-FJ+b- z>NPN6{krct-*n1dy5g4qof~6~e;bF5&Bg-b4x_K(Z}+9ljhCz3*tv8Fs=LzF82^K2 z6w0Ao<7p+Hs$4t-(VIzG81h^^B;J_G{~Ldle;?q#j{CV~op$FhJ)HjamWtX47dqn3 zU*etqbv7(qgFC@*t!Bc%inbg;XDO;eO2mtC z^z8>Y%DO^ZmMcq`&#Np;70Kfe+1c%Q=iS%P>Fqq!@dquh*77PXlPGf`Znm~)H5wxe zhoJD`5nR?p^tH-Xn4N2po{E%l;7<7@NFt)+wT#mejT{_TLB@5oG`W*);}CVY1&uhgA3 znXE`zgyhg+c|AE4zcVut*?@)mHhbbu9C-MnE*D8HwEB$J$}EP_k7)~rB;f<@EGVC2 zJu?;zNiZ|yST(Db%TVQcqZFnYvEQH!gXv+e5K=$qV(L-6^MRXBgr@<%N% zYWcmC6jM6HLlbkd!SMSDM6)I46d5Z>5DaR+JFzfg5qz3k+x<}Ejt6(kV zp0qh+x(o0|-;E0Px>A1P8BAqQ#PG+ai$a~4|9{7mw9q5~?m zI-AwmEXHN|ZMi91`)!%8HREG-uo`x{$OoHxIUbwTQdZ)jORdIcb1i1{qw{6Ed+(wdIvC}#9lPPr~T$D{Y-2K@`^(EYgOGHygEGG7wmpQmM!mUFc%q|?PH6+?eO zQ|3%Ds*2hWM(fTNqkIh8EHKq#tu_lWJ63`e4X^idNzDk*;Sw$-YBivSU6#qOOf@x# z6UkKNStv@R*8Dp>6Io3)EkZRl`++nJbMU+)jPi4v6r=npj$0h@Q9nl=iTX1ltf8m@ zQ2}ul%cm=UyEVGw6_jy}(de#ke+7kSZP3z^+gn;77TVn_#v};eLljyuCXpIeqOfH` zWl4B2Qj5hPo%VV_dF4mQePnDxsoHoS8bhv4IAiktmR4wK!u6B%a=Ly@M5Q|K$z46@ z#)>iLMD$-OE61QDI;g~lEU0Zd8VHU`TvYT25@gH(*ucPzQ4UVv#wdrB80Fw3VIL~P zjg@0i8DgA+No#}zsNjV$NyZ>4DBpo0pxG~sNig~=P-la_3Zz6?9(^cx6^<2Cf&0IA z6+fjCMW#l`LPg5NWVCx%jftlk%SMehi{&BRjjM5e{$Uu0!7&3Z=%8#^jbr+UDpL3m zo}2O+qDXy&3|1tVXZv7q9mf&=0b2H_qBZ#6bCr~o+#c6cyas3VBq~xEAw3nT=dO|R zNzf$X6Gt6PKK>J0&e8HwDJicRX*-#GN3-ZW{}%pbQkrF-a5Z<4e++-QL{!DKks?d9 z^K;4)d>)0hvOJ?mUWAk?QX3&9illNbuO5U+T~*JXRhn?Dq;-QZsmno9Qhm%SE%Z#EGHLzSY#5P_o4q*!ST?dd7a@fa6?mRtE&L_8q)GEE$`Iw4lSo>IhhQ1 zL893#dnhh<0gB>w5(#Ikl$6Ik6}C{Vtp)ySu@Gys7oWwQ{s&XOq!yTqA;(7+pGAAf zW+9uU{sO;^^I$F7i^!VPS~xnv8X~o8=bcTW8lL+n!R-pP*$=I&Gud>5DEI4#vh zN;B|ol_K@9mo0!-i$S>!dSadhz}(m1Q86DU)S$rz7*S)7*s%7Xj1O^{>`P5!5C@T3 z+k{-5zPobIn5Y4@?zh}CGA*ZW%E`0CP4o*=USWt4yEtQ9Xfix=-bDq3(BujFni zDZp(MWqDg$c53+sb$kV-X!Asj>2<~{DIB8}NzJjzNJYvlLV*dkElt@%5rT52_GWIpBHUCyi(zBDPRusurZ9XIfNOsCd_FIDCipFj7vvLU+9x$#sNM#x%f;ad>e zmRfCijn=9LY{N60!6R-jQO6lP7Pn5zwOX25R%y9LOEh<-|7tB)NlAqd;dfIMN4%DCTE=P_)UrFB!p|@KPiy&PVyvpU%Qsy%xv7A3IydP=#7qi3V-Eo?#4{yeNFQ}O6 z*{ql_zO|Hd+H1*yUFeF+1EtV7OvE?SWo^ia(uW|kD&w`%NL3N?2Su7AA_1S#Ji70F{V;BM~kR#Kn4vv`aFEdeH!Jjv2d zZEeOl*WDP2h+b~S1-lzb#zrM+7>16`UO)*H&dRdX*xH#|HP&gz-7i~?hN;o1DcM$n zQ>$#X=8v~J&7L@753}|??ltZUtl5DQc!jfg-I>ykcEF3;& zD*7#F7~G=3R2z&&gfSP*oUt789+j1g01cJ|0MW1)agLElBz#yDz)octgSDJc!T<@M zS2;Ici*iFbV;g7uG9vm;DJgv>irHfMYfMq3$|96LhX?fd*K4*}mBmE#cp5h`HwjT= z7F1Wf9DUV^xQT!QW+fT}5=2wMcwCHoxs+6Y071~%<8d$UrHTai+N4a8ya;(tk@Ih$ zN9D-GGi|>g@4c*>${xoBN^O^?O>2!dE2z!ZEcpreahCIW>whzAo?naXQ+{V}6uvJl zL80hE>*j}-z{2Jkt>tfAqBJ2e8wJZ@DEJ1#W$YycW}`64i;y0Q)LH~FhW`3UjXdLj z^m;A~8Yyi((uFRfnaPA=S0tfQSib~wKTb-U@cxxLmX{)%PESJtTV9GM_-{o*5%Pl~ z;RyL&65+m}<-cfJ>|&mOjUr-3>|&fdQISvvVjqh1R3scBXq_OH@S=4hWgN}hfa`5r zhi6f;f$zLpk*R|)qe_ttk_fS0SwbAxSC)0!vQ}A!UfzPPzaLeG^HDnNM{(}_layxa zl{~9OnI2ZC>=Kl(P~#=&A&TF)1P#ssQd0Mua8rpJae$U8oWp6E0C@zd! zi+$i)7xfI|B?ySHER&#|Aac!tidykkVA^3-f8>PD^kW}I;V0cV(@GY?xdHN zJ+7tciMbOR^69BgwA zVfW)~Gixs)MNDhPzNHv#)`bn|zqqkOq|6sRg6<>p1VH<0oWIxY5;GhkP60 z%3YBg6)AiZMf@oI%+#r7Fi>SaJ;DHYAsjEJcf8H+FI3IWd(yZS{ud{TF+IB6MX*BDhHl0{`YF96EP>K z(X3BOU|o%GJ&jij-Z6s=cD?Vmy~QbXuPC5iMs*NrCO?g`HiAG|uUYl<}ww z6Dk~vYD{YNwiZlfhI3HrZTLM{6Rxl6G|n$v|B#Zx{OT&pSw&hSRB`Xg!o1^;!QSqSOTL!39Nu)bHf+#ub2=2rxD2kXyM7*5#)sRbTWpuq;S5tOPe$Lz&;1Z`(<2do)4DO1K- z3k;{F&++-Lmy&9E6-32z)u^<4YfFl<6mn;TEr<(2>$GPPvT|x2Dv~{m5YNxUHAt=g z%Mxu3@YuR!!LKZqx(Hd+`79rQW975e=(epMqW~rI$e%L3#HOg|b?wx8>bX-@?rUln zb0TJ2%(F4~#te<=9B2uA5HJIe1u_Cj?c3TPZ(q}XQTwUw2eyxi{(E#?G)3n}-y9u_ zFOfF*U&dy!IsO!E7u)39=_~i$=Nsnh;%)JMfSsZr^JaLHJZ+xio*K_0&s5JqPmKHT z?m8?m%6H%Fj&=R$YKV5d?3(M!aizH2&L(#SKT7_IpESOO-Smi>Gzi9WvL-TEEN;#!X(H+Gbg!WlSfuT~r+_pfMF^lUwb0x6(<+AE*h4MpflEXED{ zG4iq)ZSmHlIH7ZKX#&;rG+C*obI^#rD4l~wl2w73j=lT<1I36eNL9EFm~HZLgf3zG z6%(#~l!;TAWmj^$aw4BKIz}Q9jZbgz*{hb}_!Ic-p;h?F*oT^J7O+{~r&B|?-X|o!i<(FZz7MI0t zw^^IhVqok5tQqL+B8#{>e*y& z;9o$9`%DuGrVgR4WU=X*RbdUdV)X0pTP$^0YJW>@#7Ka4D7UE)Q?2y3+MaG4floC&>GQv#8H&I-w7WPj2Z|!BdIP2`jHF(kdCsCWR7IfHJz|xwQ zqs@9|;~KP7Uy%DfP|6CB{?%wVCqIOuO=>|d0;N`HF%f+m^{GayIC`v<)KokK=jl>| zF!P(4T1y#?w#At)HE0prtb9GXh3tm~gWQ3WNt$f<5P~rZ3h_I9v#th1jXd*_-Gt~s zTbz>--v3fcX#tDbq2!${$d#q zYGZmy#CSlns!P~@&6+P^do^pEzZy-uk47X^-qG7hBR?9^!}vW7|7b)KuUlf#Q3}k2 zwahUJH07ZA$q5Z?>;Oj!bZt?9*0S##`$k|)b|h`thr&3>jm+))P#C*!QY0@zQ0Kij z0)d_rT7D)a)z3yBpyEBG@f}kn_^3@jRV0tezIymXUrD>`ag^vQq%@na-~vaPx+#<@ z%OHii(4jm>B26vDt8g?AvE3R=6V7jyEqyC~OlhmKG~qm>Y=sxlc^t*p=KhX`^VPf0 zYuTb@vzCaaOZ&H#d+;FYdAPv22MJx35(Ig+iUQ9|(0H8dUFor0g5=L^uvCIV2C8Oz zP1bC}^=DPXHS<0k%CcG4g8cI;@`}tgK3~;u=Eg7979mtL3GI5vx*|;8BNVsJbzrSF zE3;YpWcc-ajr}-m)&Dj4)pBr1YONf?VNJBdx{KF}us}a6OWUfZX&aSwLK|QDH!~|) zSQ>8@9^r;1tHST5c?(fu`dv~|Z7oWDwostX(n7YD$CEf`hjBr53o()@wT1v&+uBg!^!hRwVQ$d_G-Y)Wdj)4y`Xj z^zec2(c4;Ilz>Ux62$WIND0u3V~;HhhF~K3W_V!5;^v|xV}TTOs!NUHw01(4_9&uL z?#XBz%P^{Ya)L2ZgC~AMLB-%E1=<)~^iM{m$*Z7Q6aMaW-ai>bQFoy0{k?xOCK6mB ziP3W+0+prBl(6 zuJ5({mzL+X{HK=RP>~0BmhYXMLM0wtckMom5*2%JOY5b{Y(;ZI5win|Jf%|dwr{jH z>{~RKWr8XiE+!|MffiobI8xwhJT(f1E2 ze6=Xv7oGN-`SHv)bo^^#4&sMrt?qGt1z0QJgC{37b1WKuXSbn;-g^g5D%qh8D3GOA z#H`n?7ceX7e!Qeo+tQ!cu`!F~7guWL708E)w_b?HO@NI$xEq6QJY|kG;n+gM5AH^# zghvcX^((c7YI(iC^7*r}1$fPeA{d$+dm#zEJt<~MK~s4G@`SM$642X|JYVs0Ui-tG zsxHITORZ&|i`>ES5BvJS)jPgmZhFkdI+;$-z zbKa$?>0O=7*rY(pE(^S@z+47LyP_tKeu55lt-<^Kn3j)fIa|v|q@=QUn^7KgMGK;E z7qp%q?ZPYzLKz{oyblpE==ISqlpiZ8?xS5OVZq@1Ts3Ym>Pso9ih~u(a$1q*2x(Fz^|9`jYwOP5ryUeR-$D@;kDR=-kBmN6lrv0rhp>H6-o9?cAJ&A zpfO0n`Vm?c-ePkZ7~*$RlU!y6RP6k4&@xj9l=i-b$ek0v+gpe+I(KV{-%9>ptK}{! zv1tM7lYMU?D(QU3JS`6$Tv+Wk&+?gt2=ewX;pgh99)2wVcy7tMUSRRZC#mRw!*| zO@urpiSZ;ha6%yWN+qsP0;>=hc2lCO5?!RAw&H4(TkoQHA9mXXrD1K;sdsVaatlz^ z87|!5WGq&u*if3 z#T73{h?*Be_A16Pg5z-v)`WAtvgJj{Iz>_!oz>p0#n3vHEJqd0-5{xD@L(}(SfGRc^qcHuSAn;KF*t(Wof}2yo2A*$1$5%qhgiTJe#Gy zXt7#mZ#)CP@MV}sEVWu*(YpN^9Iw@8$?H*)NES-5{0eOruvxv$@=%!2sONB$aVv3- zko{DgvE~u}G2alsf_E4Dq%U(LZmK{~=djWm3-C*tm)(XLYYwJL}3W zVvZOmt`hBx&x|cbsgY|8GCH*Thg@+Lt5#fnQM4MR#P4{IPqRwS1#!+}(t!?@uEtPz zsg%^r9gU-h$&4YqgCiE_4B;Id zvGfMgTzfSSU@FrPZNlOk=`vW$K~mDkX$`2P#4OD9S%6=PHQ_i(YsD?_FRqM$I-0+pKnSU*#wKNiwDI zC(_})Xw-)I1wXbI&0Oa;NrZE&mRq!}(K4*%Yjoa?b2svY>x*uF8mkmp%9E_VchAD> zyHb$=PsQ3%_YGRRH_+C)Z;&l{U{aAmk_h)eWeIR7u2T%(-CxmL!x=&xD^}z)MZyts zjN-~rWY@GE!Q(7Lxi@txn#euNmiI7_yJ0eRD^Vwfd0uumCU6OUeghXNTyED^;rC)D zyzkSv`O8sbn*1B%J@@&3uX4|q2Je=WGxI0UC!SY53q5yv24IBaAMQQw7u*lKZ*|AJ zesUdlZF0>+thtZN>pbmz%eluk5!1~boLQ`(kzJPsKqK2Gn)@1GGoCN8m98helsblGdD^H<4G4cbdEP`<(@(d z8#-ePEE@(|c%uyoYi$^^Arf(Kp6zB>()TV*>fwl$q4W!ru!9P(ta<;!B=jT@%}^?} z-GUO4q2mh6(d-l7*+>$;e^HtbMR?UrMA}CCP^kOPE7BYx&5EQB+@^fcrs#{@;X8;C zcZ&(%Ipx(DA^%V$nYYZ~V>^Ak6^m)?^BFxIpW7<6A~_8~khV7LN;IBO;&CPBDDjvS)K-e1(vF5*@wBC37i!Gg@f`SHbhu^OS#)atYK)rh z;AY-qD3CuM%}NQb zqj~S`LX8-Z62q5hRF%BP#X+))N=Kw+riu5$%%)anYeAxbpmd+fdgn z!ReMMP-la31!8y)7wNG?fvF5Sm80s2;msVRmdkrlDvkFNk=l9Qtc*3IggTVtT*KD` zRPa+mP?{fG25(p?L8=7CP{kYW!ZT16SQoC+5THM+a(a}nQ#+NzuIQn%{0SZ>-5Rqv zW*T;F?ilzwuqW^Wbsg7x7-cU_zJuP#y*7@<8YlhKHRd38?)X%0k4>RsZqhyR6Q12C zH15ftcoN7;v}d0D2~nI{l>e+%ZbCDj6>Nblead;6g!I|Mi~ps`kfzGP>i>9Qv@uAe znpZIT0S}O071{N>7DXsrgDLZDL!E~1cCSL5wT-8cvSxgMeU5NJbs;Vz3xDcCHI@kz zK{>Qw9z@9TLCQ4`B6Ro;N`vVp@}2*Wn%sf;h>F~=NL7T)z-q7-)bQUQ!T95*miWqk z*WgKSYC-X8A>loxoB}-2aDs6X_259ftE?F}&^Y5HwvirWfk6t)WiSy36rLw3lTT5| zJ6S67xFo_eM@qA>JIDHz=~0F1FF}vcTGz=WbFQ2=nD)636 z!u%c<)X*L6!`WUug{cbUy^Hrlg1Z!$n_b{C6A^tx)Al|y9!jDa4DhI;hSL=ku6r%z z9!t5~Ql?o7wqxaI-LL=wx{dtsuadyPdR6A;3^Y!dQw#^Gr53wT`%(+6=A(Qm)Qml> zl`5PL^I^U!O8qgygkr*JXk$8t33S?8EPK;w9j)dbPAlB)l|rq~lOFQ}FwFAvYnJYc zFt28TOZ?m__j-G&+*voDb)Mg5&w_uC|{Fav7-O|<$v9$A?L$YcyhtE5N(reM=t(Af4>{_&jy@M4B$URRL8mJLZ z$}x8g;$I*T)5B$Mhl=_RF)2;c*P-^84#InfxfNzqqnEJBnl;;uPW8v8sT0sTN)WDE zGZPBBd%G;YZ`JWb==2eBtAvb|)T5iRf2}-*?{&>; zE@4|WYqJ@p{_Sd)`4y62%!1dTx^BRP(5V~_l3EORGNhKDit1Eqjl7&)YIPPvfXlc+ zpy`}}nn#+3545y8?_|xIIbJ2LF+8zCYI&D0)>kdLEo3h{C{MX>oN zh;ZysmNFiXQ1#+5nTdS9F)FA2x4q9Z%JCAI*oLtj(_Of0Fdc*f94xsvqa0mK-Vjr= zkj(;I1J7tVfL~<*O=~%T7rDQb*mmjPmZh&Ec}y;TQ<8vvrp|v;l4MrN0U;(d?BH&U z(6P3gD+j5KJGNPCExhwe$%YoZ!n{ticmry>V93(yKSoSKTJtO>yo;#Axe|1FIb74} zTnVN#?Lb*@*|`#oS7NVIwuN}t8fi=CZ7$hHVVhN1Ot_zS_4jN|KCS zO1!JYJ5o?{H#BTU{|)Jya?pqx^*8)bZz@yDBn#|NV6F|`P@usAw40+(GVxYvGPY2` zS0$+Ec%R4FU*Q?YJjO%GtPSI_IjQyLZG)v2Uc=QfTWf5;4(1un+L-nK2W~0)57he~ zxZQe=iz#xIB0=6ZLy<&DM079eh$@BBufenG#I;43_aNx(LZO&G4~G21d2@&iK-RbT`SF7uoE9@$9 zgnf+tcO#ySt`|cuW_?E-YI^ypi}%}snl`@W*q&g z3vzqwZsg-fx*$)ur%-+uv`jnH;mX)1+>;ck+GG1nRHQjV##2T&&SMj#q+||?jPJ&I zEL)NKQv5iIWGWKmpNk3A@D4iJ-EbXwHfv+Hd?2Reg}Dq|F%VNNLOfZYHD}ZbzKLZ6 zOM6frnx`5bx1clXSVTQ)sUqrA+Jg_P822eLLkX;?W{3sV5ERF2HYbiPk2epE<#B({ z*zy9(9E&o0E`O?pEmY`5XH#lrT&_xO%N#_yq*k{Ixv8@(vnNtVl-Xt;c8kHQ4n)bk zYD*cjrBS%DlvAyAs<;iu4?l%lVoi9yr=_>y+`AhQ7JL4-a!fAxmm=v;+dk(N3GfGG zRH|kw>fVB;29E+S?2hN!$Z=TKL^S3!v#pWGilr@i1>Vo%?)a^uu>&C63M1r?X3+)% z4Fw-51sjE;S+Woe1!9F}4VSPLR)Xc)7>gAfR)VDzmtKyN9v@SIr1V9INA^m`eYzGZ zQWYWd6=`O|nOb;Wk{@>R`8%l0&6_^|UptWe|L*ZD+rtx&ZALN3-E*lmbVCzBYHiFC zKi!Oz9;5hAH={D+kBl(E0-2RcGpoMlAW^SRH{)0NM7gC~M&UW6Nb3~sE^M$1Y)mq0^u6Nip*al8SzZ*3Owv_=#0( z%SL7CeVw*YGf#D!eFC?zk_t}X7Gij!h6LCd0{}yjQar1~GfLod0c;VHsyoYnA;AagFh3P-LZAn(()mHsSYc>6JMW6CL@bkde;Db8xkS5*j8W6vV)Ejpm$%VqC0TWa%=I!mpxsfb=+ zc0iw)!}jP6Uj{Y7tnzbkq!V;hff3w%2@XeaYP9Vw+^?#mnPn1=!nSS+#wv3-BtmBh zgP00cC}b?xa+#J(wJg`NOv@$a+GDs#;qPZDeU*ZJchI5R03(8LeKf1J8P!kfYnzkJ z^YZaDVjib?5h}`7Oyo4bmrp7gEpSbnBeald1>c5Q^#O;dq)xf`sF+fXx6Gw{cV-RG z@X>6iVpM}DvrdtJ4;IF&1I6BkdS2rO%p=lj&FxtJt`0wy`<1hUs3E~FRmw9LkBGAWiC@z)Y*u3h1sN@Zu{ zN1x|cm2dwho6dK+=Xs`h`g)>i;i0RO1Nw6c$D(SYN~5OH=HA!D8M0m6O@5Y9c>W-z z({nW!EPY0AMiW-9Nnq6s%1!k9j^2zmu3VL%9B#3EF`ExoQavo!xIJN;33VG@YYxo8Tn~Vuk zIL7n2(x$xK!|D8+rBXc?WFw}$jo#)pXbImmHVTUvUj zrRCWyop)A}eznYwq&1+O?xKb?TvH=|{NdBI2DIfJiXzQiWrv*#``L?Hes+h^5?MDT6I#kWwhH z>KX)1`fo$mjFdj&kS}f1sP2@K_*oq&THR1n~;_>!VUWA-hq?U=QDBEQI^LeiE z#eCK-w;czLPwiUL?>)zA$0NwRiBWA)pG9p!=FN>76x9J^w(p76;t_GX=w@@?{`4CsqSv~jh*8O#_?Y}< zZ>L)pa!NgIQK~TQ?Fte-_Qf z`ZXx8r8bUdC9F7$Fh`JQB}le*yv1?}+R*_h;0jlu;VQK%UgsyZR^A+nHR1n;4iCVq zU2 S3MQlCRb|32$}2BIe*Pap#{%O3 delta 64648 zcmZ5o2V4}#_rKZO-Rq4m9NZp7=^QEuHo%Tj?H!0>MFCsva#&-cf<&Ggbup%@QB0zj zVnI!ei4BaYCJH7oT`|cozf}FdnLW%efB4*+neV*mv%53zy_u@k?YgR}zqK{NK1+-$ zmF*YR!#|Q*8{d_piM{R#h?*^pNXd-a3cI|u(MiHQQma;L;mL^ViLV^9WLrtv%Mx!@ z`c0HmRG`$MMR{T-f@YRc(E`*jnF2_@g}e`)*&XhF!HoWieXL zn#0VeO`T0G#%N=U!DRSAzgTxvKUlj{H-q%jmUE!(uK8WlLp&y~71jzpRXYTthROA5srEFO_eIhbF~F}oXuPp6U`^iQ~5bpH#}HpxrkkCTWN%MHuNmaCSF zbWj^DT`hN3XX1qDy&9*C@#*SuDwV#jI$EXD*0~>5sWhB>ph_k5aQmrL>f9+va=ISN zRFj%jgs4(fyHu9b7O>2=^tPDHH_V64Rpvr-xanupSyP>9im97PZMv*#H*$vS zhF1+w7)o{bNt*s+)h_)gZZFqccTiVOKGNRPKCX?@+|g{%gp1e3g`!3HSlBDf5_onwK1J{#WBfGKSr&K#3yy{@EXwXjy)sB-}t~!WaZv8QYSmA!>K)rnQ697tsA2~=VKSk=u4w~`; zt;{cuq3PbFjW&7$f_gdKx3aZHAf0Oq%tEhleq1P2Mg<1a1_a5W zSa+S!%lW@(pgajty}$k`l|N37fB(t*v17&v1Hx!|w3^<2{imM1p^vH$?X+~c73*CJ zwQGAti&35UUQQKM-gE>)QGB3Y>>R}RickPK_qu}^#Hhf3Sl&D!>BXpke}I}Rw9EWy z+biG1&Xtxw7A$o4eBK5F2&v3Y|M zeE4Bbl`L(vlNd3<#sa$^r-4K(pNlcJS)oLNmA?n*#Y783$mb3cD<+z00IFsMN^$}w zz-SXgY4+&0y>M)9G`{dHp8VC_xqqgspRc}<4+kytKj1X<|8U*V^#AE-f{#J$56k^hn{q>Jj5Y7Blbh{q}E zb`!bjphTR|P&VHOvlI)^zL4%1|hAO{w9yT}SjFO0^*Xw-Zb z^&X9qrCAaQfsA($zK(E|>_Qp=O?#GDVf>%8pm(sK44QTZ)|<7Ek{ z6Pi!@S|M@&XoDP&g~iJoC@MS3uzK>73~6V4HTy@0$bT_lEK*E3uQ)`Wx`w7{VnEHx zM{V+TOsJPHGB_Hk6SmANv%;_gSU~r+l(~kX1>a)VIIQ+PLd1IKSVx{e(cfV&_V$ zW%p2tIH3KNg+AcAi22@LPrEHsPR7I%G+e~o@-|Q?!wVHt=op1$6H+HgmkKBz+<&_xjA?G5+wmnjTV{-+o{VRr}_DB-pU*QY>a=cuOZ&4!gJvjXm zPfv*lEoa*1c|V$ej##P#N3nIn>TI@7fFnL)0Qe;Gw>l}S3MqO{>C~Ae_|3;JeD+8& za8eJjh4VpBZstUfCys9=gg{~D^5Gu*7UvO9S`sfM!ZpXFSlC4Nh3iR)N@ZFW&4H$0`c>fY7GYn!w)G+%0B#je5|LVrF& zeUKw$AW^CMVxhz9Eq5Cw7weA9Rq>!l1025d&rNl7sZ|%s8TNCh+oO1CD4C-AQ6#RtPv( z1_a4T$Z6$N$2`HW>Tf?z7r)&DEGw}WY`?H|*&4Pod)4v+Tb(`4)@Qpc7cKRcbCyGv z8cUO9qvZ|DO1fejVp(PTX=ikQK=01tEZ||x|Zst zDwQ~_+Nx4niq}-CRQ#*%S;&8H?FyvrYtxbcg*7=i3zOD}xaJF6orN@XwF7IFL05sA z&DAMb%1L)W`mjs7)6>!{+`3 z&TX?pq2b5JLxg>}Y~2M%o*o?vjgJTZrJ`oQ2Xw6 zJ4mlPgZYWrN~rvymmMrYU3}o~6+bt8wpQ8nR7ebkc#4Or9Yo<8e-H zwd??m)#!xb*5!lg;}vTGX^)+cRSdhQM~`7AN%uc-XVD3$rDIl}lEn43%iM(!D-Xc(sd)v}8G?r}Al7H|f4p?i`h zu&|J;HpqYPb&zaMbsJg-8iVCT4Ee*oEMpj~JgNzVySchfoD}0(a@I6j!ImI}xGh;0a5iioLi>NeS;!FLCULUO5kbKliravf^y#{fbeYpZq8q3ERY5mh=LDw2AT zSNg@*6@M-2>+$t4ECHhlRPHSfufOgP1$=Gm-RsH2Tu;?G^06gNRcYR0`o>gb%rt2= zKWd&6u86hx=ENVi72J(tcc_&7XTT3J{tuuk#($iSzoXL8Se57dSpQc!-f&RWMt6Uo zdlrZLFIRiy1pg8atER#SkgJ7Zo&C?NM^=x6wwLvG@a^oM-c6_;qY3Qe*AkJ;I(M~^T%BNe7k|SbW&N~v23!8ut4qew}a^T{$z#`NdZX)y|d(zDN~cXcI(c= zCImdyNdc=eyNj1_2H#Z~dl+8Sf2H4pGx%HW1kG^qL*bOTLFmSBQ{Pe-a+}FFs=JnM z=GCU3Op+0tuE0)uOu3i#iFgSLtjpQV&GRf<9w6h)ofnY7>x^qUdhLrv0q4~ayfR?0 z=M%|aCMHZqf65;mSQ=0RNvi@{JUy2M^dtK0>T*qB?m&D;!>SblfpR;(IrTiYdNH&t z53tK2`*A}L=LZ{2@a~ELA0-}ct_TQLyz*>hh&xslAhr(NsIpXBI++{I{Y=+P(~Q3v zHyZ7RLxxO!i~bSa&$=}_f9>F35B2eSk zC4*ZK0aYb|kx*17TA`*}UyWyAN#L7i%W`6x?G|{gN=w_LRW_nD9A;&80dF?-|>o@AhlIt*83JZd=R(lu> zuet4~S05z_s#f(uoL{Z4hPXU8TJ0ZkkQ_as2lPE_kAuK3?cJdw%%043N`_U7>s_8h zU)qZZ*v^C`6C04X>@H9FZTsUKSdVscabj-$d3&;F(zo{i5NJQr$tCi=lGUXgw7lFe z-t+4B_P6k@bJ3oRFYLm7om}fb{n3uUQ2d2?KK;=V>qFAE2~vNvR+dJBc!Oi(!>G6JM-<9szlKS?O;GC@I# z-Eomnd^O&xA(sCSR~$-2*xFMlOj6RN5W0A8Av;-h!^Bp5YUUpCdv_&xg{U8i&;?-@SuVT}Vl%D3kfz zby#hXxV%^|cfX6GG5Jcf1NZ6qw;V)*%yB4uGx(t-?;eI@<)aLxt2lYo7Y^JXh9H$t zM%T>>C*HlTm)~Tlo1*ofW7_iFUs~lZUph!<`D+TwbI?CX&PD2gI~h6~?0Hqv%Rgc` zUVa6s1df12yFBVET3jcjxc7~E!zv#}Z>qeC0qx~B6bVjgvC2`mY0h6MZ2hrTYY~W- zhar{Z!zL#QmcK`ToRBj7VS87NIuyc9EtFfZoni3ckkbaKZ+NWo)~{(^!;wnR`13Io zK)>k`<=+_k8d3)&`Aas~;(1mtk3~6WoEVeK_eZhNX1~Y~kWNv_S`ID6{ruq|sE9g* zbsXl@3%!T&69U1Oe=P{APT{*L;#&t}VN!aKFJ_nF+D^@Rmj^Q|Yst*V)Eh zI3Mz-CFx<6Ia-uoLa?)3#-bV%aOlkbl=poGG~JA|LZA0=W-MfAPoyYV+KV|P#p7(e zf#FzrFGHn7Ox!r1!4VGw04#eg}>G9EEug|HAz1@(xFVQAjSvRae zj?S_UJ3|6Vn_!h?^rp(?45&E~YlSTdSgfrRE%pP3R_w!3n~gxK+zTnD`!+UME_lOE zMCiA2K$P6T&_judp>qFVnofgMf||bAFVCYlUYlUaATvA$$E$0UUe2)7!~#-eOTx$JdGyB0vnf!Y7-Lm7i?Sh< zvb8Wa=}aU(R43^icogBzau1|f8M}#CA?%2xmrw2v#GS?GEOIwe2RPEBLgYSSG!r#a z6xkk+w8Cb}T*a7QM6|O!lf~7H!soHS0hv=AR=Iqg8KrLJ4w^>_Qc14aK%deN z&>tu7M2h401V!$(;*(5Dykk|I8flYr+vqzW6sZFa_y03QK7rnNc{Nh({=wl^INa-3 zs~m$bsm}7Z3@x~TLeEtQq-gZ`iX5SOQ`WTO8)!P`Kp`eXX7`nmd{`e;3`YbArp zM%@d#3SF8`r9H2$(T>$RG`BT}HH$QTG`x60+$xR}CE;s)`93Q26$JhwU&oKP*_ujU>U)9Fx>f9&*n?`jK>KJ|jf|d1162 zGNK}Vpgh9)6lB(Q#XWOMj2YL<(IF5S>2yF--@aD3d{#0*RZ}mkb$6J!Yb0pf?Rg_zQcaR8j3JNbs5#%_Q;B;zFM({~dTxJhAeNnQL zH{`j0P@dP|Ch};GDUpzTSqjy>ss!Y{BdKgs^)iq%$fEi?l24A}WEs&gQS~PcLGqSC zzR-M8GLhfqIe%g^h5s_VqyrcJLoz+Ul4bXJY_h2OP|22~oYx@#vJ|Pi>SgK5a@M`=4JIoE zy#*(cNoIV*TfrBgI&M+vLlNk(|5l$47T$$(tj!(wp*tJ7esxyt; zfu>iV4S|MKe3qU~buJ}8C}UdCdx#A_zbYBXcM8xNKFS6cZqTIPFr481b5Rq?_V+XQwo4wr9lEJ~8>WVj1(N|-I8?WYLc9*?S>y*J( z`20g`=p`xuv&$~j@&&_|Ue0uK(c8kOXwxq!f!2wMw)$P;^n0}QwNu)=AEFQusP4 zVx1JZPKsJ5bzUcRStmuWlVXlZv3w>;1)(^0oF8|ZTVpwad!-2TTjurV0<+0<)ibX+ zR>zr9E3W)0E9b&1NfGcrOKgX-#AdLv#15W}i|$EK`TRb{7y)@V4*Pk=Oo@G62N`>^ zrofdqf75uD&WSY<@4kQ~R`vyUB{4%9{kug%5gfgDa?4xm}j_cSZN5*AJPxf-Ow%4 znYABi=b)X%MNOS1Q~Xg}i}L&T@RMTUkMkpWqTZn%#NFT?=OV~ea)>-eoT}GJ0sceQ zCLvT^>Q0y4s6-2cqFWBs6@4m+i1qVgRZ)X|Amu)e#f9fFe9Z4UutTRt(cB`Cq9W7#@Z&&ZI8J_? zq4?-K`IHKh5m^sZZ^BegvfgkNRX!>MXloZ*qf*xu*$|5TE+#2(r>9e z7lAnJ`C!_W%t#?KD$+(*QMBtjd3&+IlIa6|a6Kja#wz?l5BZxToFT_r<4`w=>InX* z5I>)U88t>?_|xzo!(GF-hA#}C8$L3$7%mvzHk>lNZg|DBn8YvRJT^Z5Y0Y>+ZQ_W7 z(J)3s8MQMS%xD0k{*3xD>dUB&Q4^y^Mh%SW8P$4B#`s^dw9S2pTvGN5ue3qCZidQ_GPpWqrDmJ#b{4PQyEQRwEOzB zka$m8NW#UbJ?*Oha{9;gPTdi-UO1*5ZmF<{<~`;_(_5O;nn~gou?xzjbNN{QxcIxe zNj;YPhFeJ{kRMh5g}zk@ud7R{gTXO9@e!S=I!G)<=}REodn+*>h7C#z2PT85I38=Q zUz{KqRBlssCofO!@4@l#S5^m))tqFNU1uyr&`qxnK%Q?I@kjoQGJg3(uq^M67zT|_pPS`}dH(mSjRq=4+0d@oZP+^4VjkwnxGrT+uGwk>( zn${<6PQYKHCuwui)ixV`KJDr}m6#>82-Eow`2L`+a3y*&D_m;STbrxHF{dt*0--S4 zl?UvjN55{^4WiCm?es=AYLnuj;2@U&@UsX?Cpf$nA%7!BP-$Wh{QMM@V#`CJsWh>4 z{fmoSo);G-k6jXh73rpWMuoE6gO)KC&is;jtXXY3Y#L7nm_*c3Ofu>~YEK^KDQ{2S zX%I%Ba-P7$@h5SfNq;3jNANpXS_m!g*b|_6!_(m!QbYj`O5VMcxBkHWWY2;7T}KnD zrHJBj)s@$LKVVx>x9&%WciSrnh4E@kOj{}x#!>IX?1jQu8h$v~>wgftd43SP`@cvP zgI!e{K_k5$E38Es_17RZbn9YzgPX+#llRC{607nSpI%2MiVSTnfY4?&wcqe&X9L9IOixf|wLU%tLJmW^v?Y>}rO#f9kM`d8h18`%B?v1jJFA;AYxt|MP)MFNQR2Ju^5;Dw%5C7i@7n(fA3oq!Bq73 z4FD?a5p$y%>TZwqqK!t1c46p|vKR%GYo4A(C|I`5QgSwors1hd2fwmEkW_y<55;2l!3PM z5ey8h-xOsN>QCAxD6Tc4nCFoT^ZX~%YA{;b{{K82tB zCy>cT#oQo<7WN7EqKz|gBy$59YBOOe1-L}S`doKfg#f-#HvzKR!W3QG~#^z`d?l&&4^-bKvyQHrU@#I9F&1VQVJ z0mx=!Xx2SMr8n)!W>wN1qph$of8p;m^2J)L*xC?GVy0+a*7qz$AQYpZSj;t1aNFr% z%-py;2r?fnMouG3@2u{GvvKT+7Q(8?s9Gc!nv^Zn1 zRnW0Mxc52s;y<94akliP=@%pfLgT7HEMKGKTK{iURM1w;^?!H;1^wdRD9_N-ga3`H z8Ct0OcfK4$-QNcOD__E@+xBYr?0U({^U1146-Z6%x}C%MXI4{Wv4Q}v3%*aSj@{;a;W?c zjS*kz9$0@m9UGU&z>;@pC3Bg-%{nj~s^)(eR3DD7mAo7!)ldByh#ftQ0(HqhD@;Ha z(_-FGCFz3i(%G_E%*$rLy{L{V-~t(|FodDDZa8YJV$A`x5rY|e5G!Z!=iK}rtq z(wYrq{>r7_p?`qlztz!|RZ*G9-CsfHc~KMvBG`|CWF<|O#r$oLb};lzM21WTo?eEN zhSeuC#))VKLkquBJ|Kb6KAR4BUxvCb()#qF{<_m&c$2j@^hAca}?6 znO+Rda;AIH#{0C}dor}Py0aIp0ose{483Thb0j^07d2 zOT%9}{(9oC7yf$VuaDKcXH_Har z^JNB-tN_+?b5TNw0Fb|q*C0Yr*=e8>zv8EIQ2dpiVIZ+e;mgs+KR+~4U*9t$oy4G|4b8)>W2fLw zdCe?6N1_#^re{V7kj1A_nmw)9&Y^XX!dP2^_P38wMwFLhQdvr+CchlgFpzHd3PB|m z<1E-TE11J&4+|vi8*i1bBaZ<$k_>6TP!@u_Oxhh#UQT5e8d!4zkHfX*8P!!0rFk1G-@Y@ws{vyZE^#`26-ORFsS(sn{A;SSk#p=>^B@QXb2pN*1Wn>>?)HI zmpKW0IlgwBffyB_W=|>FhnE#23yw5RUSKN4UPJjO?H5TG6fCFpmPk69KPXLlYbqK# z_Ze)ED-jNZ;y!~z;MSd9p?mt!hKovn{n*P+1SLUp=Poud%q`YKWl!wD_R|w>P|LH*moU1R(M8~s zo0%HP{4*Gx&ge8oOBtQYXbF_%W`-3r|5!%HFglvik&F&!w2;vPM)MfWVRRUyLm3^y z=paT1GCF|Ke(S%>&BR|8{;0n{{swry%N-b4&1-*C4FREfV20jMzz@|m@j>d_FtmB# zP|x<}fq7~$efUN)3e+>u@nl~cI8TJi$-ne~fYNcXV81@FJ7gpdaB;f*kgq=JTL1F( zf%qHbdHMR_`~mcBI1B`7$XGn2GEo{53Wd@VKg+jj4|hVnRy{)PL{a8lYOQe^3f}A_ zfGasK&a=fi#HoggY56Wu9KlROYVAM8gHP0u_uz7QK9-dZ6)Ou}p1z%jbm8Q-SA7U# z@o;)(zH9wE(L+4%L}&NxL3f*DNTZ4yWVvG5jVGiu<`2yK%=65dmUOel^ttJvX&Fex z+1os?7iXU_@-B6$vf1S0&B~u;gq7LX$SOWeRiN6h zvb0#9w#>H-u=tyAn?2?Y=5gj&)1RjIOi!80Onppd<4t3uvC3FrlKt3JaTsvvqz zwu|H5gz?OM1)f-woyc(q;W%@jgCCgN0QR+%Zztq2cPmt_%}(OD^>75kMsAsFu&$60 zomPvs24_PfEP9UPDw76LP^;DIpym7LBFs9Do8}tm#R#-48y+Db#!ZC9>u9ON;e~ay zlpOe!h5LbiJ;i%M&-E0Kfif1x_g`#7BJ5&;P`JokU-*kL86j~4W#*uG112WFK@D?n zLlfdg((3B*um;ZPvk?|8_R5E@)Q4c@=oJch(UtNKxadBsD>(xj9>eTWRr3aO55N!1 zl_6{+Ev^poHq!WwP=&6MtV&G8Ot53H6G%)$gh?Pv5)&wdm=2t;4~Z~o$%4fAf1odW zP^c^^4k7GNtrnQFq5SdeCygXKu?x#jhly=PQ4wfw)F-j?L#Xh^ClpFajA9Ti;>e@1 z5hjr&xFQjvqk%*8?V<=lgYE7D?>s?k>ww>x>j!b0Xe;zEfx4>u@Hh(-c$0G|_AT&F{+yT{Pevd$Df~f<5 zdt={-&p;NWb3=qt z*Ed1?5V5UD!jVl$&~zX;yQ?r&63W*)M!z=hI@Tv9L3oHbJrg60VnhP}5J}FcK*XM) zrU;uLErZrZ7&ZP0+(RDsvJ=Q-P9P5v=l+i)(llCXV3x1?ScPXy8Z2`yeJlp^b@Ku9 zF!M6ganlcGJDR~xG{qYKFurHpZk%CEGip)3{T!s$WWUb|X<(?u5ftK}H**tUPHpxh zoM49*44VXBm%9@dsLY?rMZF@dBF723M0XIY2Az)1FAhbhMS+89TQMj84*=%=1m~Ii z9sIsEJDKCZfcQGyog-^R1~%6s59mi7?D zJxR;h2NRjQ8#XcbNjS;ejc}K_)gW!7e9NH_T@$}3(fx3Uz#XRGNCUa!n z5WMtMcC?ODk4l=xf;TXz=G6J1-9<~yhBW35fEmoqfbF|zsp;?@3%lSC=5~SD-L!Nm zX|lH<{KWkEVXPX-Q-Sdt{E!D3 z8Vy7At@_1yJnOh_Bw7(|*ACEphUcv%@uE0YvBl#cr?YI#6OuaxYaj$U&P&`xqv#V9m2?;eTTtXW=dR(|gxKGYyYFAEc>0(GVq6}e1-+7)U= zRG>~oIF`)lP$4cZ^3mLErV1S#^yZy`f{nE6Q$GFcvss^<}8k~iv#X{--3Ly;u=jv(W_@zRrnd|=oPtj!I?#$MtIrjnE|+P zd8A&p4xlv~F84)ugj|J6WXC_YG4e$kE7u}*Lgx5HD>ISlA^*+*Y9Ax-8b~=xkfI4@ zhlz~L52EP14DGOz(aU8FO-3p~^OC_K@-+tRK#D3^6gYz8^?beDdoWE*kfJL0sd0As zIrPTKGa1lfHDi>`Lnzv!peOQ!cbgd;6+pR-O;DwUI}n)k54J6kzb{`OIM+J~QX%JaE!v8EEN@>;7-d?{f9#o#xr*K4!D2)pW$P+BAww<8-Dj zXp;Y)@hM}Ou`l`5Xdwp-t%jEks|}+Jo%R3KpV#l;-qz2;<+f3GgDl5m+f}+kUAXpV z?b}+=&d{c74Vuq2hcv4OXbLn=@n`WZaho_@OcxE@bm22J1*{T=qr&4C{%yXVpU$WA zdVI8BQm;}Msw22xNFLVO`{&@h3DDLU>R0EuqY-woU}e#8|2pZ8DC}UttZpeRmOKUd z6U!#nXB`ybecLZidL~9Bm)RMb7M&EKgfFqMtu2y;IdZnH@w>@zWm!&Ao#n@9cFsn4 zp7GR1{uCpU=a|3pTmt3S2-Wn=4P=cRu4Gq0g9P@TPf(8FOl8qWo-Ab1o{A+oMV#mU zvYg#&dTIxHEzF6b9#UJk;`V4(Se^Fk7{$YXMdK{+)u1&|ubllFO;^@j4waNzt609I zR|?Cw?%cQE)tSHlMo>ymMzs!Qs~i4*akhiK~N{1`<0s&4;}W6*W5ek*7FR$Cdy z`mJv2-89B>`fkQ5y~2cAparI{l}Y4Gt8+qfhA8vYiVZJLE& zxtrWbVW#>wew6xM^-eB|{EAa-fn}i3(-L6*+Wfltaq~p-jyXZtZn|%37Dt+%HqAE; zB-_Q8O@YR5jc*tqN5y86QHACLPaEbN28gRwiJuw#`OaLm{c+yiJM`r5& z(p}Q+(Ut2mbvEr6+E-D8I7S;SfX!-3VgvpQEKP!aBZi|u8c#|IW;SXibwib5BJ)@{ z`LllB@N5>o*SMO6IoyPk)j7)O!{pUD8koKoW%}&URSDc#o0Etq&7=)E-C;T`je*9E zIVmt|LynUT>VXY8I{0c|p+rOCab-;Iz0fCG7|%*MeKrA4sBF$LgYSw|Ix97>WSue( zgm>Av(j_gpmPKoz29~D4>bRe}h~zPr*hbP>6=BuJ94(A^*ud^Awgq=RY+CNb$0de} z3!+$W{9G`?+Y!nUGWQ+EC&JzJn9E07yloSXDrJPziQdTkZC$-Hn>@q9g&(APyJ%m1 z9d>c(cC4W99B)4p8E$O!_L~sN+S2ljGWTKo<2lM~6pEC(8dys-@bVK% zJ)5v|SMMof{X@>97{gB)4l&o;q0W#9>|g;8q??{v8cq9Jc#M^38#_|yqzEhC=D${oe#|rir3;iJUgYMt6SF9J}OTcx0xNH6WF?pW*WAeorBnc(?`Qd0j z*|{mduV-3Q{zTkvwMeO8K9?_g?w`(oQ;q8^YA?{X;h8*+l&!}IkNI4|ukVrxAk-9g z?$^)&4&9H1s+Kxin;w;DqYzZxzZo-?d6j5W9nTA18WI2?{O z6n;eW_t-M6cx|Yyl`%o6T*af8b~=%xC2G8*9CZcDq_70 zM`+<(+3>;ggYl7Atq*2oIpr&Te24~aukGbrzx80DXY0Y?<*!hCh-AFC=b?sDW4kNv z5p#vVgag8ORK32&Plw^fBeFGlZa=0k$vTTlp@x@Y1A)r_5vD5UNkICdo{Pb){mLm! z70r|2G&L$c&VqF>PJCtUDdS^t*i~+PxRNp)alE3S!ut3wesg#E*oUxT^Z+?)uVH$<0 z#(5I7Ou!cr%@CSbPo^+cIb)^zpngng{H$Wyc&c;8;?GR+!c=yTFjYFE!D@I1TwFVZ z!c^;wnbc={xf{D>P#D$D2=857ifV4I@?Rb#6Ze?4k9na;hrD>VyQ$;vcV~*~HL9 zL@OCu(i>IV3~fD6RhAVDE&Q_sZ68UMljRI`m(fbj^`;xwk?!l!$T5eZZT}tZMT_Q) zK(tKBZO&K)t-m}L!PyLK={d^Fnz@(i;%6~*?^HCT!Rp{(%w$0AdDQ(g{cMFe2K#;n zL)&^{*DC0s(TGlGsI8pVZyNKTmO9ckXI0g z$Y=ti@r=f<9~(asf1~g>8h>N(H`X&Ye%!&;r}$!t9OS_HEb5o?$5f(fFa=eEy`l2Z z=rpK3i?>1)Z9|3O+{=>ReK~2^+ zyus+u$w~v6uEe>f4#3kl6qu#}1t->{Y77@t=7o0n=kTN~Orr|2sot2544=7^5wXtqA4aSK;tr0q)m`BXGr8(-0{}Vq^A9u2Fa@28bx@l zv}5iHJbZySMRB<2RX85r5XIs4mqKYUZ$I{O!6%FGyHBrxdU+le*LdJHo9voJyKba6 z<-;qVWP&)JO6s`hiNkHJl6ljg3$Vuu74==k9y``$WpRH}z%+d9k|#UKX91}AYXKgy zKR7x}wyvg{#9RfIUY&&s*D_jej*_aCM#}`DFm{y_8LHroYY>O6j~3hE@b^=3e@}02 zI>J%4VnqbrqD1d(;>cj7F3oo=RyclOG}=hd!NG%Dhen68>zeF1pYR+=;}a+>X}o|l zXn<0W=CfsX)DBb4q`ZaR(FD#VSW?IROsI%O5GT>a$sw})!(fa*^(!8OX}pM8WGY!? z-m0`gI|A|%k=9BcU5NJjmyn?^tyC&WJ=QdiH1ehas-dWD8efp`2?~f7324J5_>ef$ zB4ak+s`{Njgy(#R@-hA7N?5_+a@Nt1pSHEzo%0sTkjxlTRo6 zp`uJ24W}R5>4Nz2^-u>UO^D&N$&e|v`awTwLh-g|26pjWkS0viz-9SKRLMP$ z>bfh*>z|CCfWL|On}ol~o+qP==6JlS!PcT-keB(6FHEVJ?FS26i=sVpYtbe>oSD88 z5Ai%%>P#Ixc1DN9}*O9tRs*UwZaar=x(+%B4?nykhb zj3W&{7`7W+`p@(W^pfs1-Bj&g+84B$nvXRV8m-tMrU}i$3_-;tZs*m-+;3bBnrM7R zn#fo(*0LRS^T#<7!r`E|w9K9+w=bh7UQwZLmti?dy8JBLRrGF)fBnf4<&tYq5g#I- zLw~%y5vh|Y<>f?7mkW)SX((esO}zX!h7;s7NKvrk)y5m;=_{y6B^D_r>rlzdmoS_l zZ)GS|%gZS%DewmZcx77e!A^M}0(fT|QoN*n% z8zpZ?iY;S60TsUROz46~dpgwha-Tw47>5+k81%)I2hiI^p2dI$3iuAEz*Pp|Izp0{ zA<$V)M~cnBbp#YWi6Wny7><*lMv5oHy-IznJZuC_tVfE64x+o@$=eJ($0WamKmu7I zUq^QYS+3No{$qTfTkrNV$g}XUVZ3&kS9}C}t4H~6Yl^nWO^C;mr5%m}AhPafZlk2qln5Wy@$<@FH(i!!|rpM~wvJ-H0cU zh4KX!zo4U}HzwKS{4ppg)XncG1g!H0hsiG?m_X)v`3i>OK{9&DlI%Ygabsl%b7t2x zR--ux$B+syd*eJ5(zQ@dQZ7G0cLbU1jjD=4dy1yEUV3@cI2u1keiPjhq>ROffpz$J zA30+@)`-lOA4Rv2DyZpnUe`EvU1RC7#%Uyz+K1<<&a3cZmR*)vmNbh9*7j-LAf~2Y zIuuJwd|+eyG=F%uH5<=Up_1NnseM|mnrDi6YQQg$AoK$nFg3xm{FiA<1Rxt)@#)7a zWnMez@{IXo+F>4?y;ddTD>1+OD%biM52oR7I{s#OW;~d=Ba^orP$hbV%vmQm@7~P) z!+-KbE{IxygZ26O3GjodTlo4}{b%8Cwr5uVGT6nNHmQ2xMHXd+62FK~SO2PhQ$2}$ zz@6cibGMiBghpby= zu~4=6do)-|kFt8!ww3kiI3ZSfa&^be5x(9lA}GR6EzsLy#GwC=HjYKKllR^T z{Ji%@(6sE{2!HQ&5ESSz-^Y6+0=)M|P?#BX5u4|wFUwlB^sV_cZO$HOJ+xBeNxf6% zPXr~`(=7|JM$8~=kdmGu>`v*L>cGj_P-to#1Qm({V z|FZyJpL3NZQUxD^hF*{22Y-%oN0@Zy@euN~;xDHD2JhWr)W4Vd(@^fht`n2)BD6=z zr+o{a6hF0%T0`vyqQ^y)R6m=F2AIECy&Waa7oRuCTs^H$Yy3{Udr-g7Kz1k$?ek=` zaT>oBzy3QFq(;9m1S)6zh#%?9Iv>b|*#@#*iE5g#6;~`zdb2*Z!wRQp>^22x-rv~@ zH!u4c@WLXx3Y4Yo^x3OZ8eg-0ww=@}{)*s!Hra3v*DZ1_(3M!z$sIP3jaaQ3g`?rh z^L8M+W{1|BFeB$LcqtoL?EP>(yaNl|#F*+w*5T`?`!=ll6H2O9dq3>_owKpEJF&I( zsVl6oKMPlZkJAd3lgAXc%p!lhO2dq$ZBT%M|8BLRu@=o~ofjFt5nrm7g$Ce6?9$@8 z8a(y+)NBK^m0`!ohxI;CRkRh&w)V_MySnFUaW%LLE4ErmT{UQ{O@90$whxZP(>2j9 ziHrnig42#S?BaEBM2mU4j-gdy(EzO7Dj<6hm1g3i)$v#jGlOHd%5C{$9V)RX%L;|1 z`PeMH)PcupIqNV*Rr(67_of=G_cDc{{m&=xE{Ib&21^y7W)Dt=C0>7bR9D6ArOU;| z%H)wAvj@u|GF{|3XTFbXK^b$MM<-Eha4%esy@7MSkzH7~u-Hx(9FxjPGkxT8L1-@@ z!D+j~*!J=v@Jwb_Xl;Sk^G@lnS!gTfzbBqjAYNY-^$AZ6Lx+q=1eq4rN;AgZ-G(|H1^d&Ny}Q4k#?ZStQh;AetbS- zjz&bW^!-tI4-N8vUyX+SxXD0RG5Rf>j*JYZTMdL2v){r`7J6aV+YDChezO*?@xtvR z7g27-@Rxjvx0JC=yfCu?MiU9i3rjneP;SKv_{c96|ArMa;L|72ww+}{w=c-8*a5fn zLP_8M!iphqEpdC{hI6cficN6gn`kx6xV^B~DtHlEF(XX7Loo?%Nk&EBKK=bvFHEn~MOb1m`3|KhOn64`(?!_9;7T7HCO(;SeS7>Pj!%Fq#P~gBk2>`+smJ1|Jz+8D2lO z66H&WeQ33V6y^iH6}0I$_E-iowDxj81*MnaB5#1g`yElx%pY09{TZ749gevoHGHSc zR_~gM;u)#p&ntKXqpGLsu*!1w;ni33%qggIdec;8avHxhzJzx~B^jCxb$E!bVgz)p z;k$b94y-Tnnjv=mm2<~a9k%8LUR!gDFkAEHAJ(I3`3SGAIl_vqd7;5mWo0~MzAz7uFa^xAtj~GB(v$Q4yrUO+t)Lo+BJ}wKYJ)~;8DLYzd+6IF!O9YFuzr;Y;si}!jXmZlRcLX&+j1xg{y}1cmZs@`HXoyUOcza z6l~mY>}hxb{F;#`w|V{?4LJYvV}j@FkLI6N<3Y*|{k>-;TQ(%e!^zXw4qVek0-a3s zP$~5MY<`7q&W0>x%fRM7n6Mxo_CJkmv}+;w^Z6g^N;mYy*d&Z~8z;uYp?&kh=}5r% z8}qN~rf=wlk?g?h*LW!Q=2`l#$J3zd=^J{2xpdz2W3BT^<2I5Tye%}_#&KitKF1je zFwBJuiP<#^93m4AL_tH%j4t$M$ugo?-8Q?X$HAzr3*M8RoDWIn^vO^(XP*m2U0Y&W zU7lC!7W}CNNB`<%g64+ktheo6;IZvq_-j`*rq(tuT%~?d{U~f~UO3I9?Uv}CgJw5q z^F^(`zWE}`7@p`ZLj=tYskP#%4}E==Y$Kfp`O zJq)ernl$qg-H$xXvGCpHXhx)&?sAuSjeAhNb3HQBz;T+%N$z3_!m9Td;{7+8yd-xK z3m?I-n$rw|>+jP7`hxiyEjSf2t}S$-rlbNLlO`z9J)V_A4I}psSwqX=licIH<>(;U z@BjFE54b9h=wJNq-pVa-!4d;1T(C>I*t=kCv4Tl7ny8?$H?YNMl%^1sC}_eY8Xbx~ zh7y&V*Z_$dyTmS0F}4_^g2tFw|KBsakpFwX_nv<4&hB^4)LnLG&iT%n(T(6;`bBWU z`!#au(-?SdhKH=(Ib?0P%O;&e)`lBwjR=2b$3zIGl?V^9pz{=8%tnbYkF6Bp7FI06 zY*uw1Z8ofRCJa=|m3Y=FeDc&j>lR`%sjjXaQfzu<%()$QWxmPS$|+0c{Cy+j#yXPZUX zja?LBFmt<1xRZ4fp@U5kVF3F^gtgfT5!Pj|MCiss%1Ithi7C{>4=_TT{z-5G@o0(r z=EhwXpYLBGhEK$Kd+G~|xH zT8Vl}Kiio=tXh?oSZ|oPIr^#WqDVW2xm}|?K4hJ);b!TFvT5Ra09!A@p6rweyCql5 zNv@ddZ8mn)^wAtLSZ1ItYdxujHi^}(`1n&-M#abXWfoxp5e>^aLg$?f zb=|)4@jF`Cc9~tbH^r)Knv_qy^YMHgi*bM3sWn7|@@qqDNg=Mje}268Mnq$2@c&^$ z0=N_WriPKW1x|A>nqshADbv`&aN01@pwWM&cj|uDMQQ)h7HNmc59JiuS8^e%J1i=u zfiJeYU>oBnp45&@RJ?BZlHQ#UN`*em=0hal9k0%)c6(9Ucq60h)#8w zPb-t^qe8)|dV~y=%IYLmR7$%VtV99OV-XF_mxX;h0?Lz*uo&f4DYhb*MUlJ}j-WxU z0Y%Ar1O}YGz&hWHWyfLn@aRYHm^EqI~$sSU}t#M?bvN$HFMl zsk?(zuF=ZBI(E>+o$Iy<{jwf**$X5{}mO$}#vk7=609psd;@cr%4p68Iada@TAnG|z z+w-FWzlSK7#T-s z3MXPQ*sJy^>{~+J>Q^hecnQ3o`SNE#15sn;e*CwbigoYMN6#mGj5Kiewi4kxlH#_* zm;JZIDvpE#=23CKK?HXVmRdGr5^m`J4k&ZEi0X%Z0NnvD7{BD7SXi2_aQ)*b#I32ix_Wd8sP2Z;-DyY1b(Llp-! zh!4RLTGpc|IJgtt)V~1)^4$VHZG#Fp_z&nXM?pK>La^Cfx{7%(JsegaO=Imjjc%sL z`yM(tj{VTxWZP+rvRSP^SU<8Hw#>rdZl2RMq;j6aVu(x_Ne*u5&WN=svpsry)U(yX?ozu=Bh9JiJnJOhLs)*XY_QC=^s&6Dea^B@c?JoR&!f#}L)fMbXGgQ29`_J* z7Z#2yNy{pGZEwTwY(J}#Se|E}ZO2Mybry^`)?@H?DD#LU)s?s^Ea8DdRWaOB2rbv^ zE)|3pV{ffWNLkcUA-DAs@{W`<&1En`rhj)>g(9wLL^siDym1cV3Weny}9i ztz@#0Rlc=-Ia?1O*NT`}%|K#$50PW|vd<`$Q7UFZ8_%w00c!`~(cN@*D*NKsIf8v7 z6|j|?&wj*u-V=NwqrOD6B-%^q0z)@9kx}fn&lL~c>~i1jZP{CakD8K#Xdzw9T2!fU zy0QjuS1Cc!CsLH!4q?6|@^q_uC!|r#Z`)bXI_=k%H(B`Eb}eu#)%Lv48uo_t_|0dV z3)YJxruJk>-06VkpNo5EkWP#Gq>r2`3Xr8!EOhhPwyd=DcOf84hgsR+?ICOsmAm7M zr78<-uSh1J@!-l7P`F0 zF1pjjSEJFilZrHQcdW!I@;K=6sYe6%3iqw<7GE8rN!^Mpm}x__@IJ=GzVqV&)Qq=XXW=y8OBM*_RsSTqiKOzStj%HK$v1y*9%**N!mQfPC9MO7dPwfu-G{-Y@^`l3))6+HHP7Q)X_^FAAxJ0wu~TxZ z4_Ilz?=6}4lx%;RJ4TMwy~n17E_qLJJd-&AI=$w;RsIIqAUx6d6%7eu!AmRsU6Wfa z`O~UMRc-q;NK64`&3V_i-IomY(|H&)ZQ<>1$#5^N9nL^k$*rZor6TE5DO~fLD<*15 za<#D`^4hiP8~2fV;|LKqF3l$gH*T_e;{Yo+ZoGQqNKUzN6V)3>h%!%)R_5ttHy5k( z^u)1C;!;DcZ)$pJ4r$!2w#&v7rck%XprhV*?`FBPn#%^cblH5jMHO~*G38uQngT~p$0K+ea1cOt5O}|3l6-zwU>iTK_ z)o#%agHzaD@<(zl*s5YBKg}unL;m0Y4N6cLQ|YTVss3Kl{MV2QNg+>3ub{~iZeZf? z2riq#o(jDTotoku=GGS^t?;L!FU})%rb3&_YWI1y%zIfRqw=PnK<@?@GVsY_aS*H! zr%Egi6|U@7hhT*{6@3-u3+amc%L!JfQ&mm2QbE%dh1Tv*RTDn!x>e@eP0s;cc3#T*Eg^b zk&wcgs=5yKnt;hz1Xv+ZMNh@MUBF}_0<18oBL7Au1su~TgB!l#|0$Y=pq-I}bz@sEXclp=km5gONWD=D`}3&as9 zdUKSL>Kvj`2AmARt^=8o0#0Z>RD~m8%K`kMfU~}ZrdUXmO~=?I;RC=nR*>4$A2K79 z9IuC~GGy5SJY2wKjhCq~*>{lieUVgZh^2^r6GWLF?xLH3>tslh`996Ji#BVaP} z0De!vS#K{^;rNts1ivfb$Z0DDObvcV5xj|KP^5=FhXjWxg5OTBhGE(P94TPW+pPej zM>3LyqBugJ1#eV$NZ1dPsa1moEM01+2n&-B;6VhhF8D`9^XCYr=0G7_wTsLT)YoJL zh426Yhu*-5L%?MI1l(W1Wyin~6mb4M0rwMd{od$0E8iP|2;F7c7g1h@ zx0KI~Kr1i(;brKid~O6FJ~z5><#QttF;396adm07teH_Aj0jV-G8pNplpzrDO^bMA zou_Q*lk8kp=v=OgD$?Z(Yjima-GMEsPH#wqnef{@GxHmn+mLl1@ja&1S2tS7-7rXi zyEDMK*g-bFi(p}atrhdh4)`U8Jof|a%kt+e^^wl2aoh9YUxKAy@Pj*xxl0>J=M+4z zV>(!53*|(k`^5r7cAy)GJ#`-+p zg()+4;0TW>KOkr6Czjqdy@OOr0Zgx7qiRJt=u=1AbU1A}yB&7qZs{=hl8fR~rJEZ_ z#oX~D@&BYQu;R{GxXp`|LqOUuGT`}$^Iwgl|Aa>ERgJ8lE{%l4;q;Eu z9>qL-O^^CrX@%533Ls-4l3~lR`fPf1DpmrvM$8Y2sNIHL6pG#<^Snzz@>{3ZmkO2h z;?dX>!guL5JPNaxg5Bb?20Vp#NqDC!;IrA2O>IN{xtDH1<^I|}5?quMN3(J;Tt~kM zz>>xri0jB!;hwX&dO6@g7H>or`Dzx2VTq6Jf^K-&V6zw8;Gki-N>s&w6mo+DazT2$ z5qE2470lK9^>a|#MB?y8WNh?eCn!GFN_W%mj}&nF3>3eZF^^rCvXD(09Oij-GJUMeka^^~J zv+W`VFY`3=C>b^nY|Xn#num>rH+altwKLO}xOiroQDVO>nBv1|+$b9+Gsi6TA|pF( z6kD|jS2P#Eu%;|-icg034_>YxveW!5uzE|iT_Bp+F>Kp101nFA<$AGtc}OI($hSBs zQS7!WJ}LY2((p^iZxw#4@%sY5FY(J@`}0_Dd!S~Tq=Cy$ID8x~O?B5XuY5MhH71`K zbh5`#RwlO^?ke5M!rhdybNH?GK`bJCoxedEMGPQbVRMs$QkLy!_?+D z{%_`oTOIOO8trNAM6E{tikmY1q)u##u}#2&wOuFxFC5W8mAh@{0b3}~`2wvbaN{pdpuY)}Sls+RfFRxvM?W_B z1h_IqpF~;s(5+O)_W})@fI9#lx1E;)g?bQ2JQ^yQ8#ikkp@{;u|1;a0e-0ps{~-Vn zyX*6(09x`PI68RP1bTVK{e%Z+EFPq$d^f?Yn&k~eDk+_hA*yk*eOQk`~ z+3-_dyp`-no{2(2tHs7wqym;%y$?WEZwH14gpTK zO3y<)kU7t03R|VODK!$fuu}dQ2cNoIzpMow_kgL{6Oj8LHTO_nllwq5_x!W<0)^97 zWim_p5Vf2EYB}jY?ts-ZHM_c;{%Se)3z=#;{g`z|_5^KoYG3wa*t#)p?x}s$+`}`% zVGIdhS6%nsYMxS$ia_>j__{8p)Lv?Kj$AaeD3FRl-Hkl~lUddAan>NkzixDD56Tr- zWUIH}uyPHa?rl^t%F$6fE~%lK5%!_0&)b&J)3&OZ9$@NIDC##`IS1O3KNBbov6wRhOfka& z%6|;v@WiQXIV+z56smq4@#kO7nd9A1i$H!<$kq_&_&Jg#R@5+t@54LXd5zv;6%UsC z@iQn4%70OL;?;3?)O#Q!9v$y>kSduC0J}E53F4aNLlDY}zmGNW9f$(`7e~~;nk(0y z&jjFL5d**W&e#E|q(dZiS%8^y@xmqo9ls4UkoUk5H3y514?avy*(QLf-3fL!02Df2 z2sDs;;RucYU+}y^#5&&aXF}%-l;&W9UdQ5hqO|3Rf?LUMVfR9S zs|EN6fh?lymoS`AF3$xz1V_9qeS3i1NUXd#N-*{q<+58u#f)ABn?xZJ922NNj`-db z%m#f9L_PMJ60|-}36_bduw)n}h_cs?R|(+sTiA8`HGmd;IF62tbyMo|@h7Nx2k08# z8)OFlo|T^h3W7S0=!I$$A8(6K93Db}<_=wkJvm601I0rq&>A)$H{d&mhfsVJBR)^_ z=FiH_i9$ zT=V)fC|&BV!uFr=AWR0-Tni#Ui?n~*m-r5YBzattx|w$VBJ5yGV_yFCFKD!(T~aqP zBP#oIE!>8!HM*t4U3^kkB}-+*%iYhS;@H?gDgAV>6gv%lt^8x)E%`4f(U)=kw_ZGR zHx29J?kv~y8;EPlJK+dMeV-IB-uWCE+Oh%Qk=0`)PGe(MM6x>&g}($G@x6;i+h}mi zy8n(SpYs$oQ$$scMHCH_*`nW<<1a%*T^3Q9eN*5qa1WJUM9J=nxE?s-gXIl2tQbGP zqGaz}pd7b}sHk`G0Cl?ybdNy2aCBr;t+I>ZG>f~7tO72Q{CtWLBWqSf=yHMn1{B)} zU|kV^03eX}!x5W6)yu)u1mkP|-X(fGz6Xi{4m4PN2Y~k68%J1sal4vcrY=mRYZg_N zOom=`>`HYWFyd~>mkG3H2;QDQ0^rB|W-qPJUqJ%)R~)h9-2LSaKIICv`~U#d?LNKe1keIU z+(H66UnPlE0>JLaemqA2&jbMd7FJ{6HHlFgU<{WCm?eOt0)VOuqci}Gs|ot6qA-`Q z$a*=NN%nCKnXlijhMu3oGT#jD+yeFSx{Gj7;B*ZN%U-54L?(VhX29sLHUu*3%#C2% zC4Q`BRdY53yGHfOZioe=iBq*qTC^dU*_LjA(yw>+79@%miKLuCIFF+9>W@EVFp4tB z3RcG9k%=QIzT)!gK-PI1_jmo=H~Xdm$wm`g^Hnb_Uc6IE*%_Jb+8LSCZLrPqhBL8q z4N)TDRbN4ru-IACtejBUVrC;mh#f&GG{3?oZaJYg^FJYp)gIT}SbFO;INO%Z4}%jG zrU>zhqIY}MR`q9cPfa0ZogY#mZs%cwLm_U**;kuUh}!{nOoTtMSDR6YTP_y-QT%2Wy#<}; zhJLDDld>5wZ_r4GL~d;8w@74q#+HikKGyir^)>boq19C8OeDu?l!$r0+SZ6%i?B9{E{sc;YyY(!lmq*2$PsS zm*i%!aD?urNx=zoQHWqYVJB7S>fH^dvCf1!H6@o=W5nNzX#_hjlD^A~c~tp#z}Qr@IE4G`jKu)$L<#)In~pGVZdqnQP$@n#_`r1 zl_YsQ2mH#^iE5&<37-VAE=|=$tn-PS3EFX~VE?FKb*9~Q{j~0ZaOn92(n(%bPny<(UpTA}nv9X%DXE0OR#_mMZ zZQ`1hwlTn+dSd}B@ixb_Xt3&4GF{>YYN&NVe~_NLLhbzmK|PDaHM6QhXnu=N%3eGl0$A7mC!8mrHXZk3G4JP^c>+LR=EsWc z{JEzC+tkS(v{7C=cJKY%_V8f?kH_T3+U3Pg?LUlI8w(#n+T17mX;rrih*d#iiVl0r zvRS$C>r;qk6uGd>Wl>237Wc_fsDcYoO_;1l0s0LaEvCk{TtYLU5 z5cm|QTx)iRWnDka?Et-=BAC{{mM8Puu-A|A4~a z{#XJvp65QeK;DQO@j?;EADLTUdd{NvkedA^`sV85ebBA$LvB%7 zd0;NQi%wg)cCQzkIx5#9{llyNpu6>yQnZ()zeP$Oe~;7|>3yDKvHqBzUKx8gTKE`X z4Kv#IxAPAgIH0?J%-#R=lO{`#Sm-;*vl21v3Iv%0ZsOSB`UTLmZb8K=mH3Kwh#$NU z85JNIROxqrw6js~pbig2Mp%=*Z(|+4;yx*Iv&l-3mV>rVzl0onyq^nIYr`MC*o@72 z=!`#DK^v&Nqx3s5R^N}Cb58&~?E#VHX5!c|qiCa#^gF>Fc)Av z1S3?UvLr(~nWDng>!va*e^6XB6#j1>roy}glI7n5jq(w<(o-O1KB5EdN5|q-1io8n(9--C`7QmWip-#S_VFooEp7^?k}?f*oS! zZ=-tamAt#9sr8Qd6`Z3%i8=+YvqpvUcjgnU6>$2A5EagJeMhh?VCg{v6^_5KonTC9 z$TiM`ekxp1cQe5n0T+aRrf!&H*9!(ioPbLl!iv5~Tx8uqj$%_CnlQtlx=_H8_vWfF z&6NRvDqx2XY71%H@`MEn@;g`rl%D3uK<5jzMBbt%x1Vw&_!9w3-S99H8zUobsiz@f zo&Y_Y_^9-Y%DAnyQ*_R)_-(`QTi9^7zx0U1w|tOPDjbEIT6oMd4TrOEaya*c`%?E2 zV1d0bKDV8)t+&P4!fjsGn{tG8mo>%uzO^}asXH!}TGm;nTf!_}<{Rc+=45cMn#*w* z6rOP7*u^+pcDU7o?}dxuZbcG6npD{?MbhQp?o=2V)@U*n<;A0JxXv-+KaoLF+S+TmY;a$Pk}be}=W6B)<$Yp4)O`O;Tvk%qNO z$40h%EPBKcs$5sSw@_STBi6Dw)fXL)?HZsq0y}`e#zn9~faKw=-PI1w#?5QXle)P> zdjl$JQAXPUX`#e7;~{W}j+oRwK>9=FO~zuY8j|999Rk#TTGS>$`hsVmWkOs!$oJ!1 zd1UzN)&bH!{t9_1&Lj1mL^ER#rZl3w^}mZtN5Gio>{Q;_(87rAQbkMmvzq>z!*Ge? zwnBPLt0=U0w7axP+99Sx#ACDegA3liQjix%lPbmycfoe&6G_!*!|a&X(DO0_69^&aUoH z-B1@gK;T`@W7221NNiF5~1y~Nwc2Hkt0S)9Y1)Bfu-Ui(Kg!EhE07Fgp z?cVx)rT|U|ASwrZf|CH+^R_rbfvXP2?Y#Hjq@Y?S(1>+~-h2yyAg;#|+TZa|9byn~ z zN7M!1J~r(;&`g3}P=%HOg+twa7^GL`p_>@cmi&=G>4PYkAgt)jMg!Nrq-;h&z`^r( z(cn4$%tk$*BH#lA*Dyt(YzGQuJC0~w9n#-|9>UwcqB2(rw1z~&!pc=sNgn8k9K2+`ig7{GZ5T}Hn1AuK)lX};5Pz|vJz0^z9dFOwNMHU0P;+8jp z20jpudPx^P0gieVop|xI7BH?*{9nLHuim3NKOP26yH9fE&W2{ULQSA%j_fu@Gsn#a zCB-~T-F7YSI}f)a2(9v6DE#=~+uWPFM%drm&4#-V4B zwx3#lTED}v*~}|}hp@j3!7BQ++=u7GaTWHp!1hN{A4QCZ=^dE84=#ek~PS9TId7X5uLJ#eV5Sf&kE5* z+7t=S-Jm1TUGm})ZZxTEt4O6~pTetYES20wO*iVGoy7s+4hZ_P_#8+S2?=;ZUsy5& zZmy(7B_KDwpi(Z-41Cf#x~0mlR`C}~y;$ZV+=E#pP|@QsIA#_qGDxlYTMlpL6NgF% za)*r`o6{Z6K#|Zwy{g!I1b3w48F0{$40nC+4bLyv(QR(7O5{JmjJ9aUVKbMlR7JYx zzv9#)HC9F9|3W5Lb!W^O^GeWtZ=ZwHjTeO$xR+9jjM_=Mi$K zUcgRSyXY}=C>NF3A&JZGs7yv6di|LD88b}9=-H^trJfmGsA>%r$y0uqN&DiNXTMSW z;QdE%`Ccydj?{r@O%LxK#J2J8(gTla|I5O5Y|VSQ9g_2ZD9ry+%6wC!(R8G}&iBI& zcT@L!?mOLM-P_qN*w!$6#O^5OAJ(zEvIBc@#O}*hl^z&6U|TiieV#%O6o{Y)lJ3iB zSfFqa305fYpC}weA*WE@=PMjUfhd&sxt+zrciJ^XX!lZ@jNa{PGI~#WzD~bQ(}U^l zd#Ap6#Kg3}OMiicou(}`N!rEQA=n?s&*P+PnSJliGM*KW+v}Zh1C}}dY^ZBjt-Vhq z*|L}w=<2s|Rn*yg7oKFlAQpTiE3WG0OlkS%Ui|jCTE4med|jQXy9Rs9Irj%b`Q>!( zZw;E+80Y>ju59OicZ0I$kOMpGN`bEH?e>q7pm20Szxs{JR+m0W@#wZ6zap1Mx8g?$ zq~D8#ey?~;gYQktOk+%)OkT!6!0cROoN4T5^fx>M)qI;Fku54NZoiAGyLOgB!^XmrRy6_rot^F`H*uXlRNMoj@4?z8tl_Ayj69=n zLk#Mpe=d&XVW0X#uT{r4&!2Ux^U%QCy-iVzj_fn0n2r?VSK=}qIq>g!n|T?QnjO+& zsP#UE&(_S+7PQS)yUfyN9ppwProXJ;V%{y^CueqWW%tPZr@WgS*Xb$@F^2BO$;J-G zllUIAHXX6%T0d2WZEQ_u=_svOy>Cemk`3>(ycb_$q3gS$%<tb1 zeuim-lO<=Dwh{8NzibH7O7(RtefSBdt3gg_fsWY@aR?5{*iw3gMV6DJx=>cUbqj9Y z)>2o?tv{XLME#6?t0E4@usNsm>a))y4%%GnBMu%guptwldb7M*H@%=03~fFT-|o%% zUTpWUgHD#+7w`7&DHkZ}djG>dENt5iuauteAH?qveurH>-~V~vX3G5cUHT)7BzP-K zl#WEotFcG)iZx^CULBKIe6KK_%cMKfL&w&qw(=uKTEz%msiq^BWVY(UI&bW+8|=l0 zu9flE7}2G<7dEzYl~|61&8Dp3m@ocFA6b5%4TwK7l*5pY^rDZvUY z-0?%43OhF*CRm|`U++Izg`@hNC0L<_ODnFbFntdJE3|OOcZ*awfAs}|6>fOs^Bw;y ztZ>6ae`>12_)e0v$_>u|Wn5ISqTwZi6>@lVlTsDVJbaa4g&dwx27Q%~jz|?n4ic@5 z^jY!0!UF|d(0?1$4-?Cwx$1uyv;9Pc9WLz$S6jd_9~>lDVTWfG-XMLM!U?}l3m70n zIwXY;F73o336Uyno5)PzgJ0i#S%vd=9ihy?2d93of}TQ1+rJgk;DZD9Jil6nqo$XU zbT0wN&b(Be_dp@tQ^}+jnw8n(A{I!vMBZ-+_#`0)>$BBdqlT(5*G!b9mI`-QzU$WA=6R73E!?$;EX^~L#R&okB=**Zztd?J=WeR5C1)(JZ&rB^bSj)?WuZ#(zdxsx{ZJfwqVHx zV9K_&Kq8MpcP9k%&k03ZD*=c9vPXsE(~Bs5sDK?`;gJ>6@!LhVP63x$x~i2(Yw|Tc zvLQ+)In@t1Sx*3CZj2`(w#QLb+HdYrf-&6(96Btz8qO2S#Xtc^H_5Eddx`jaZ7JX^ z?MhYioZm~_vK9iaN?0ynYG8AL6dWh-$fUWh3^@v2mfunQj=6Mx$6s@o_VlW;GL||P z#M-2D^p5*X=20KJOJ>)}gU8#L4WDW5$q^d3Fyx#CoE#p%L|GO1X7Rc8JY2RCgxAKeWpNLOH+i}havrBdum_)y+w8taa2 zekyi1WrOy-37@8!zmzIo4;>%h4wT(mu>3FboUX@Nzqohv&|idM?}=7FO|RFj)3xO$ zI0tCdP&z1$H1!33LOV`=DSs&k87qwQu))L@Lzw;|^X%NLvy4#q7p-{1ffu#?hr`~i z=qzT5kLx)wkNX}+Og2@^jg=R@>A-Z*8%NB^Lt$U>@pgd2b*YX6ALGh?wd`bF0&li1 zCGLSEX86%PF&Kq~i4Q~(fqbh#Pv<~Y-~!Nsd*TREj%r`Ba$l6$lFt`t#K=-_7Dx42 zF6#4n6(*2mFX3N5q6y^_x`yp0&wD*I5J7w-P)7zVBzzRE+w+5T&B%Ph%K)?h^;+YK zKUW{V{W5D)oymB?)9wg&pJ+r7H{%GW5HMb_h!0@9s*CD^E({b+`4>hiGN|!|0(~2G z!rux^4#LiraSrp}570M#SiD?}DL0w3iq4_(tIz&JaQT9LkV_^D8JUQM)@v9UUBD<>X?Y~#}N%*43pti7{}nh1*uxsr&P-*N->LN zTmZUOppQfXSR*hlAXE#(7_s~L860@|26 zarDn<0q2Ci|GLclpGvx=YXE}wME0`>m!Jh1k6Ydp%1N5nf> z8Kdu&Coqv-sie7X!ckZnqXMeDSjf-ioN~ zT`*;jqs8#pIsQxs;inUR8IPAYAU~ZX5;oxsJYZTu-OC$+`MfMoQ}BrXXk=PP>SVhO zot(>WQb!7B?ZpkT332dAWII_@hflldiwS7VNz`Q$)fOXD8&>NS#g=3H;6s@IJMZ6v zgN##1F8wsU`EKgA4^WmXU8mpYA^}-aq+bEd&ITj?LLX{oQ z+1aTRwUIbl5wXuIN7^yO1Mp$@*E;One%TkBRem@5OndfzDe5(v$9AK+dS-7tK}DtI zSf?Y_?eKLLH2Om|wqhv=YTX0rW{*@M`@C}W`v>Bx1M?qC9bI|Zlf8fxIz)I=Uy0`! zCIa#FR`fvuHf}){!xSXubBi~>j?!_9S6V#T06hBAdrDlTZynHjKcRkjw=n9g6c*>v zTQfjfjr~GB-Y~9r|6QBmzS}+3qf9#BKGHqd_SzU`J1sUD>S?QEy$!!0%d`irA6kQ? zD$8rQf6KDWw7g}hgS~}*pq+(`Gh{z=pxZ0AQ*N1VGqr!Y^~9F3cZ{B!3IVkzvSiUt2Av?@NXXp0L(mQan=VdkP@*KOjs1WdsP*#%*!! z_@+7qJ`tx-k)aeRRq_X`(SJ%?!c9k&ww(V$?I-;@O3Y$cBvORYaDdv33E%BR#pDyN zT$laSMp$E{q5J{0NOW=kxz)`#G*QH)-08~QF??JT4Wgbzz@$r~6s%+$`3Tymn^;uHQ z+4br(iSZc=)gIHsX9@}~vm9;2^B2+m6!*F|o==gJAzN+h*Ik=ospM!pRPZvgFG6@t z(eD$9(~BZRcj}*G=(3z1{S@)&UqB|xqc0sx&Ock2e@^Guq>I#2Bdx@sPcoi@rDuw< zqtR$MXUH+c>hcWT4K{tbZlQiFYjXPBP*?ovbFlH4#>u-p?E!G`Mf+cWU9Y?)`|j+y z7cA726^i^L7R(N1*7N7aK@AnRCB`X*vypCDp{}#%&slfdY-56xh%N~xA!w$tbjl1(KNLPgQ)Gh(^g-VaHleQZ@y=DoQAI)DS%$LZ%oWZOWt1))}J3ul*=lMc+f z>bz671+s3dNTLawA+8-P3)eQAPe>ANL#t#Ldwz|`t}fH9K7Yz)Gle8p?^KF?boVq6 zkrFe0K{?5+!xzZe`Zt^W#d$y3dYh$+>ucDZmGBF!5~0m{DkQO*4vAKD`kY!}{W&CY zc1^J*wbCeOYY|KS@_e*x&0}}JL@RXGt;{DwoMf|s8C1qvHY0=bUd^%)VY4m|P9%?n zO0Ri_wm`30lYSmB{-3GG*uFoE%+JRyjnkI84L}x&SZxf z6*7(e%c#%^%;zh@qu2luj$jK#7|C+KIv*-q`?Bi@ZPqTqiR87A_{Sy|diG@i>+$vZ zRrCm%)-9oH)1!linQG z*t#J&aZn8sJL~7h5@W7!sN^*Ey9k%DH`meiV%ATDacqGI=d)RoTmIOX1weZ$YwP8TNx+%P%wd|~DVWnzd;#G)>Cmn}xtGwPzB8GrcrCyYo^ zs|7)v!zYQqt0?MXU$b5@K*bgX2eJ>h2m4DcQa+e+5x-0LU3PsirF>KEQ1e4gU(FGX zdB5c@9BGX(H*@>j?I(G-+iJIHw-D1y(=pT6a2BGKo*2uF+&E22XTkC1L%gt8<*&AF zwt2SRw%XR;tOeFMmJwh6T(~$jxP2!a@MjLa5$JlHP;Le#v0xY!d;M6#p8lb(sHNp; zl9c1BT2|gb%Y4@k4aFEfa_vxOO3aFK{C;)CthkcySqJUvC-2qhhU)^guds$T8(Yuz z#_HiK$*ylQuWXZ9VsxrEhSyg}saZ8972C4l+?APg<)gjYM!M!MW7JiDtUySLp_KsX zSMCI|wD0fur!4&Fs%zm#*A_4L&_2*OqqOJ>Q-WL z;sbe_+(vp175q{uLV81U0{@kN@-;C#c4eKeNRZaoqA46cjov*#vq+pq+5Ue9g5HE735QY+1d|!q9GUQ!-F`x;nnW1_vJY5>^@Q zxW!GXm+Jj>1m+HyVYsy!%ET;5*~M1K{@U@3mCpr^Jy4rc0c4cnzmg#Dh$E)%uM9A9 zy(l#Ep3Mo(0*X0kOPCQuW|vs_cR&L%V>;;&<80V$cXMy95Hq6e5O7h-I9Ra0yyo59_ii%RXZ$De;kq3e%O;>$+7U@K$=o7 z6X@yhU>hTA4*wEp5Pt*|_4~aH6kaj{#qD;%VjU#rst|x}9u`pk0RT)^0XP`xw1fea zy(ZZ+Lbe5tm~}$0#ajlG>?#F-MoY1V#_fS@4x(VIMiflvi6`OQeIzQP521>TK^$hN z&~-8X7rJknK#u~&&E1KnOxp)Nz&W5Txf4fNcrKtKpbuO4Njo)g4NwQq1XmclrGO?3 zgE?^AqxW-t_<8_viYR~@B5`{@1~6t9UZDO~c1H)!@S^;W3bck$%+CQ0;;nE*ZYrtR z&fmg=heg!_t>zNj`L{qD3lD!wbQMc<*O%yi+o-#duWPMYBAf=j(RixW$8yy2iKUtO z0*G2|-EO)abDIzD{zX%Ysh#nb@eAWyhDW@c-5&$gNtN;SjDwOC@?Yd4Xqw}vynrGu zN#XrvCAJUYD)si%wNuc~;)kWF%E~;S6-RC5zQ2wfJK}m}o zK|!tej9`WGS76_$!fC$>SfTtCtb3}$_BOXkTA}=9oqq*QcalQ%OTP?la>S)4ps8N2 z3q@p-LiCFzKPL*@`T@ZT(a-ToOBD`z@h8Cw(Jwp+_5>k)+EqocLiDS;+fRjyi%B|B zA^J%VCabVB^d7+q<*&@WRW&?Em@pK|U+mli)tQXFNzw}CFFfFofZ@t#SUK93sPg`j z(ZQ$*I{1))72@A@Cuo>}NhPXq{vvslDj3$`k=?Iu;yhib$>wO+Y3huz(NibRm^Hgo zn^tYwwjGrCu~4B#c81nmWPG|0-MPfM3Y@#PI-9u{2%aP0@W(v_{02Mydq1l5Yyp>* zQ_m;PQeb$qXfv z?bUr*Y`^P?)=d|!Yx_T2H%-WwUHsFZN91~A3)IS=Z%{j?3f%E(ZZ!@q3E@)&e!chJ zYP`7CCBi2Qyr8^C4L(ow$0UJEtMAp|*&_dm0*__q6`XF&1VwbzkZRFt+(GSl&zLHf~igFj`d<=9xH%NCxucRm{E4 z&8gBVwxG_ddGXz2_-3E2G_(AVpfvQ;XL|B|V~IPmR1rAU@qwKU>P@P{ z&lH4SDm>&Pl1WjJFuYILz{AkTB`eyuG{n}I_9+(?2esJhcXXR)$YQU6GFrdzcEdQTmpi|cKX|!$Kn*~UDB_9ui}}&Ww}7P%9wKB zN6N2G(fMy^AG_SbBAoWmpa;xViuUW{Z)bU(|2DIqrl5S`w`YAT3A)R{9iExjPlQ5l z{Up>u_1N=Z18zcYm%q*H;jjma!f8*vm`6w4gsn=0V>-ewkXLKyjpJWDzPzEs8PoY0I+!bQ^xbB;h=T?)ox!_k(a>jJ8C~dxty3iE zTg`YERR0?VpszGr1QBdNjjoNbJwtD??LQ`6IOD3k7htM#C`! zN4D^2`v4?6R?v(d{IAgGoqIk&Crn4JG8|vq*{?t0u}o7c7I6&TaN>R=Rc^zB<=CB( z)b^R(=_aqph`BvBWo+dg{C;4WjtyeV|b=pW>}^3H?+|1 z)*sLg(T|WX=^lBc$gv*2vYWe0%5onpb+p~F#c9fHwKbE>4=@C4=vL@9$aLGf-P+Tf zWu9mb!F+9>+hVu=ZeFHKrc4x}(eQ|VVluMMgpugNax*{JpT>m#IQrFa?Cc!Acrhj% zfg@IZPbT#Di7~JMG6|S9n1G{z{aD5J!+6YQl-c<~KcKMm(L7-u zABQ7Mb}+m1YyLF6?u{eH+c3EEyZ{2~;)uce_bvxt*@!Z{4FE|Nm3Z^vjj5_@P!5LO z<#jRa-gn8&Gm&RYOnek~?od=$Gwrb64!H+Otx_I*SKqm3VMK_kf21@!*UB3@ac3jzdyy3Enk6*vRM%Q}a z#a90IOz%pSAI@2)v)&+rQs%>%QW=|@`4IHUY)NY3I-2>=sAHA=AA@0bE7`#wB-}Q# z$8n-{(=9`|F#xhe(Rd?6mn5x<)j1yJ|QFXD-HfutgK=S5GK7$Ny(&|99 z8CCv+;7jPiR)3`#4}4)aq-MygTe1@SgX!|>7A(~B!8mF<`6!~MM;A;FO!;hnmFu(l zk3P{85i-a)RO4~VW4*^zk4_#Y_w()>+-JInvwHE5`ntx%Ke~o>sqPz{`W8Z!Q(>PK zn3BEXkt=(}QajJSH1rp+sisn{Q)|mZk&ecGPKu z>qO?`7eBqsY4^KZH=Vpjp zY#^;obszWx&=XK|4^t*xevIE=u1S}ld{xH?PsNQjv2f14h!Q1UOb$hQDI+f{Q>@7 z;H3py0_7XUXs$g3V|ms4Bo8bsuk7~2Da|zc?d76S=x&sR-M`jB84^TfI@u5TN%cD>i{C(EgEyGsdH1JD^ zYRZ46C}E)N4UN%w(ioNf*b#fTsa8$6idj64he8&A_A|@9c?E2lLF;jV3TP@yBjaUz zX{;&|G4hr-&j%DFjZvY~2au}XXsW*r5m4)>CQdm20@_rH;<*Ti@NyA5I>k~yV_X^Z zEn!r@UWoE#c`a`@KQjI|3Fl^h&n2w@+&Xn^%;2E%DcZsxwJwYX{&B|vtp9{7ASNf zK>aeN!MxmU0>D~536v3qUiO9cH~w=t0a-**%FUQ8qN**V@SsOgH$1W`7iO_ojy)4ig<^k(A2~qFh9S@ET1n@PfCLypz6tzc0Z9^13Z6HR17KNVj z#YX_X|CJ3`7yrHUDU{8YPMSU-^gbP>pRVnywXlzhp9ZrHQG;u_wiZ7f1GdBmawzkA z6&5Ou;#rf$0{#B5@bz$}oGyKe-#_^Mi{CTX>C)%fE3Ll4N&o5P$$`GR!J}Sq(tn!y zGS9!D==bmnPNEHlR7-}NA1oOjHfDJDg%g(B_7rH@#CKnW$nLM$*CPCv9TnkU?70XZ zvcUH!{tq@%gtyoV5mvAw5tg$`5yJKA&=(;#_cP9hf}JF>}?Uouunxeg5@E! zxxeE~s`eoR$5Pl!z>bZX+y^+5$cc=SS8O=kmQdr~Wqn6by}PkbMA(sS6JZ-xF2WFI z{ea49&bog z@cv~J=|pF%WNSWr0Sn$;c3gy4*(*fIwu>ySMP@J?Ig%nTu$3cUIBd4#en~!Vt<{3U zTeq$4p_6T;AxYjf1(lW06lT1=%sA?WzicaHEl0g@$hKTIXw(Zo*|r(qp%)=ovy&N; zRHr6YLN~HLYHeSIB$4kJr6I9{8X+s4Eu9%gi>j_<9Y#}a64_)C&Sl?-a0WXm!imXI zFOs8P7Dm02Vy5G(+D4P9@yPQSz;$vuru5ny6bFz*uJ*)wLZ0Ovc8ACjdobZ zSUkp{c?g-_I=+`bNQQ<=5x>kbm(ch0q$=A;zEE8`PKTapw+q37RCh5F?Uh zbdOin5ox=PB0?p#{!ELG->L<|aF5p{uaH`!|AFR-VD2}xju2ON!1RPl&SvcMIV zYsm+-{x@8qxuy?&0Yd8zf1&0T99Le3E2^w?5nk7$B#WVHpLR{gw7W7h@{l@yw zjRID9u4T(VQ*(*=N#vsNT(hn(_UHLsEl?Fko%Ez$Qi!ge1(7PwV>T$=0sB0WKIxljtW{^$}6!*}Xa^zahIRCxz`A-lst|p8vK`ek*iW>1G>$p7y371Scs}*YJ`) z3RI6*_Erj27_W{;JiRE4Z_!d(i=i5Yvw-xwP{W8rEg=)xtpO$QQWA`PQOmDxlBZAvcl;lU;3T`qscKhWwd}OX z4pDOtfk#@vo))$gzb@ZWL)`_OKCqGh+KA@jkJKg*$M4|^LW)+Gq2kr6(NJ#c!(Ok@ z#u|FMGQwUr)yt+kVjmshjG0&5caywi@W?W^UBPOU_d~p-H^n)(ziE@auuU9AWOQxr ziDy<3%C`pLLRKy+MuvE$Jnj8@`P1G~5q^@bd66#OA>EXP({OBMy%`0t{R5v{$aDBO zQbmo2whivX#rdG;KGFg8e1(^tU!4uLAookc7?fxBlO?E?ig{(ZH>U6vNM>jGhy1Lp z;e`7`cxC;%k{9>ATO0bEv_8)p^8812Bu^glnGqDs$RTy52J83YE#WqFQ2*iK1AFy( zk4b%>`|>ozgN+GqF&qqGmQ;RUa{SRg7+^)!P0$JaMllwILG>W@)^nCrmtUSBp=~#KX1T`D@ZQp zr&?6rJuV@p0Z+rxNBTp#TJK{=8-MNj90GS$fiS#D;fOJXG_Jp^Sj0dti58a%A2WPW zo&_}u`HX7x+bF7$c+0ZlUg&lBnn8v5O2qo`*UPCg>HVKKSih8Xv^S6+)TBJ-06sgM zr&0@kQ!rLp=#6dU4vETMS26p**LJ=aNi9-^3WfcI9`gMfEtTbK0wXU>4r*^@Z8x;0 zs1Is!MM2U~nh%`ckhZQbBxd5eq4E~y$JUEH%2i1yczw8>OkEUplvK#S_C-E$%}+U> z>}jR;v#fo!QOx(a&1}%yh{g~R(d_j2r_fQSQZ?b1pAfvBeWbHWSA^|{JkLb5SsFvn zUj4PsQsm(5ha7$p!1~Wgl}mp78?5;W!chaQxRZEt8uEvDdqZP*T1j}CV)rFcznu~Y zR;NStIaYUK*L3&)g{egmG@myRmWPgJ``o-F8lhna2v zNT|C>nQae(mV8R+Goh$m0Jl(GgI;;D0psq&54?Dm2JdsD%nz-Im!uW(va~;5D@ieN z@^wv^=2wljyQz(BgJG}kHn@s^+vZA>Y<;zM+nWZB{H|e?^$+V_>*xA1>u77JRo3^k zoYf8g|LVFD@TjWmKXons?gfs6lR@>w;^Sw%JwgDsuI9S*%Yxk2^OxpFjq- z=j<0upKG5gw>3-iIY-&rBTL_R<_yPf&9TCA!!cFUdu1ag7*q`&aeGU?kgmeC_+JRz zAkK8QZQ>=j+pQ0grj+GMy=VqGfvW4oSa%gnY^aeERg#97m>9%W`K zIi6v}!LX{CmcO_1(JOd{u4Zd(E28)L!6Kjyiv$DLT>P%3S!>~X_?)2edpJ=1w>VvM!?%+&Jpu`QQbkL+MwSTO2bt8jhJ)g9i=Y)t<)h z1&%_)^RyO^oumKo#eTz8^BpGZz5L(Fb3y&Rox58+Q}0HHK1AnmT}CfpI!U|{+uK}# zzHFIZwiKyAT{aq`=Fllr&;zGc%=&}tE3!9Z{+pLs)iRq}W>;8nmpN1>`-OGW5Onbn zqOnhR*gOM0THJBR!;nczv~_X0@Zb6(_O;k2t8Q_sLA$0nRdTFe+`j}i(FgQ~hfaj) zB=Qh;f5hi&RXKHa)@#c#ToAB%IU$gwz=>aoBY`|h%E7`vF7pEciS?yPW<8RwZRXl+e`%H%Yy#lJpb*P9D-v*<_E zwAyHceFU5#^TUzvm%G$5Gv&=p=B!<>MQUDfsW%b75REe1e}$Zyx#7s()h=~wvekOl z)P-y{E*0`xVk}3juUp1hR$1>fZ?k%-GvHwDiuyx#1>g8$Ei!e7ON~;t=wWVlYZOCl z;REoRaP}H*gPl~o9obB(Im6Mz+{`D3xt%?u=e-%JnYk#z-XqsGc3`aGAz4;FAW3s} zHxinO&!nW5ad!Kk+fnf>$TRkV(SzY!7at6_IkQYA%bO+P^%fn;?6mUow_Qk5_O@Hi z)$hGUmA7QeEsyIG8;Kns^Bv+Ub#m9b3tZ=1rLJV>8_p4q(~id-9&NLhYyXFRmR+-v zEysG;`Ws8LMYr@s(CW$RKX3%@Vv3l?<9w~Eg&^5aQOy%4MD#Zl&xr$s{l^@J+qDj3ATbg*Sz!|fE?r+ z1<+zoL#@o(=MT)920Eg!-+tMzH!=E>pwAj;ZX=F_RDNY8%O9EddqIN**P+GAZAAW8 zCQTR8{U`?iFZjo>sT%6KO4M~sNFN9ZMr-KN7#%#H>&gNR`p{PM^$?^xp$$S>Atd&d z5mPo)bxd=YaX5N~4ANsuyVA4utXi<#R+hYGo` zdxHk67rbWlW*Eq+|5!%X2pWW+j2suDYC%_`1b7+6nIRY1$BEAv&@?$Xw5sZoi)IQq z1qE;f@}UNtN*RM{KNWPppx#@cY|Xoi)uep@hx7slUKQXa0pvJQ+}+#~3*ZnA75V5f z`8^%K5$-4P1kTF9Lx#Qr+J%k@+H%y$OLyGE=wQ&Ge*W|u3>EXfCFlmAP;|chRgePr zGMgJPCJF<85@3!1IE56XivpY!pn-wA$D=Qy@}A2o@BF3bJLs+FlA!W_ltNIxBbRd0 zx7dkQ;hz5me-}!`HAs1&{Zh>q9wh&Wo=x)7N5C_xgFyx7&!QNGODyvH6~(EWZ)jLk zv$dB@dPi>nXNH2;t(4{UsF3q}XT9@z=R~bJGG=X}8lh2foCgVL4%9r66*-XXQC;v+ za$wDJZbA0Q=faV@eI8Y_$P#cmkfp(+2G%}RWB03CtmE6Xa>o{ria54-)lZw$AhH$4 zt9wJM9lv(;L$Kk$Z~zEP(0tkae#?TY;ph!7sNhSZYFsB$1(wYmedcdo%chBA z{4fpy37+hw@7BKzN1hoMuR<29>wfib4v}_M8See5??04HhsVbwm(!QG7xz-_^}5D! z;Yj0s$ffgmWw4KxtTm9rU{>V)E!8jUfPLzzS2{qrs(Fj|+!dc;J)_`0Z116<6_9Z?be)@DCP?s_wyEai@|W5S66qS$=?y(IWL=_JI`YgJ&yV3Q{!e z$YqZ=fli|r+>kK%=BZRRjH}5kK##5@(dHO<(~2j9CTp5CbA#uwb1q&6C;cY2q2iodxnn3Vj*(6$WKg@& zm+x}x^PW#euWyeSMMG|WuE=i#I5=p@in0`b;pfJ7js9#X#4u0;N_V7&J9wVVtd>#e z5@o13#Nc)YY3zHX6gX}RTQir!(exvo7I3ipxoHx z1~P*Y$g#`k?8ZsT+Y-=4BaKE$VBS2nC4r7eRpusIzKDh4!aI|3JaYMPC_D<2`5g{J zMn)iBgIpcq2HuGBiZ&TyJp^mqB(w`}V=)*~=}a9zP2PDZiRsi=qW}0YTBbA$LOa#1 zlVHf#pKpj)bqxv18V(FL?K7EY!T0{6=NuB!Jng#I)aZKGbI9E>CWShB6IF&fNJH`e z0g?H*erz_opQ_C}wY{g(liFX*j=bJ35i6PA5V(l_;81->?_s?n;gm#mmqq|>x-PH>L~+V%#5;rn5+;L=&Nejj5*=e@{5R0=whxGk z%?ONfHES&tr6`g@WjhRAy%8ECf5|!!Uh)g??Q)a+=RIwZ5fY=Ibt`N} zk^FthYMog>oPW48avL7)v$IQi_&~CXgb$>sqkm<$i3cD;bbx!>&AeuJoa31Dapz>D zaPQ*O9OwA!^rpsIMLX3^Nl=|58&-F~8H{#`magVDSLFc%!eM8)TwBvYMYJ_Ob<<`^ zGlwPr5p*nvRi&z&wut|ub@@0IPLuaT_XFuk_PtdKY1VjqR5+$No|eAOd(&qD;F$PR z(AQHBD2-fHde(_o*s_hc58oS9;6927K3(wpQ5GlbSFtK!K;bwYH%!37(I2pyD}gu_gFu$hXoE;4sUh4BU@WL?sEul#}KQ z)xAPm!c;Ldu#O6?6xtl2O+k5V!aqI%-5IKQ z!`Q)EXdFXf(n2B4783N)2$aF-1wlVE&@pf|ZD#Z?ovRrN8pgg=N3G}f2kkwfZDpFF zuGUK#PG8MM5exMltpqQi4Qvt=I~vDWz1Pm#K^fSRZwzfk;n zUhfOHr7EA@GI#*?-c-NYj}W#8wx{xEYY-h?5>WYSfN&xY=vjOW^OhvzV|eNX*$(uN zEcmAnYOlyuWRXn7M!U?9JvQpUS-`pF1CFI8&mPY*&qz;P+#B#p8XWs$?5^0~#kyk- zQM)4n737s+v?H?gpK!RdM#Ho%U-fA;Oq+9yA!-Y9W$x7s{m48RrkvZx+29MM3oH&` z>^Y@5UpJ(Afkm3%Y6$!S7y|!mhLA1@3+Z;}W|r@%;&eb|>qoAo#6-583LwZV4g^NJ zH3ZZ>sy4qU@m0I#_{6yBgDXd+w|SjK{%}5^Mw;p9=K-}rvmZ8Ez*%S6wcBkUZZg>( zGQg6{k7VgbCUkI>pSciFxTLDcnF}3N&}#n>-lWllR6|ls52)X(MDuEM7+IxC)Bsa6 zGF0JT3cke48x6c3qL*HVY-3etk<1(&IFbr6#xQd}e92FH!38Hk01)VI$LrJ`yxCUx zK1F%UFh7tF@?*TM1;g;R{#$0vw=!#j|M>~u_h}pfVG`;Yqc;q^9wbW@PucGQV3rR8 zFkOKAQ86Su2962vfdH_>!ELabi(~%*1*;37 z7N_F?u?hRs)y{i4xuY{j?4FLG(ymB4>%C}xe7E7R5@xY9i z0me6WvBs&d+05H0=#d$4jP}gHD>6gSDJa3~A3A2{ZK&ZSP`JnnTL05esF%@Ua~SOn z8lLQiy(4V_(3RFOKx~`CvB5|ACT8HsP4(Z42uLv)x1KBL?*y&6?I%Qg9fIo40>x=U zLG#!%`!b`W=Q277H1s&mSh0IHl35A5p3%x#mweRzF}{xrD13vWj@rm^2^1;31ib=Z zZaN1bn?4gjytQ+0MK=~a&gf9k;KUYn*^Z%FLAMIZp4*iA1m7b8FrEcxtMr3m=wKp! zu#znW37NVK63$nN3%B0Pq-#PtBP2d!g}9i!^75)Y;Yltv3Jf8==SYkIJiL+d5@>5y zDsOCDktq#oEkn!YSxsK}0(R*2|LVh72gbTEOS30zip1?h@$n|}-;hwUtQ~?PEt*Gi zsF5nL9S%_41^ztEIn3>tatQ4paiE|BK>AVUL7s1I+h-RI5&jL}#C!rZ#2aZJ%@^1$ z)qJ~!AE#&@c7*+ltPkE5mqBM+840_`8hj!LCiPAyr`;PR&B> zgB=Fafc>2tW^Cv^2iswrA@|=e?KL*%m)G(hr#Bi9r24h7*zWII!k^|M?9cH&01r{y zjy*@sjrPLNjH47@&%7BZ5i?&>)6PQ zH-)iidLkq;7+Ssb7+@%h7CjO@O`Ag7xAAV~5bzi^-FU)k*h2%{-cQmK$H2>G{BvS)81YK;ixuFYKQP|$gdU~i>n z!QT^ntKem@ykfTm+bV8%o^O}_Dwj$C54KXnvEeHfc&Whi00+g@MGyZ1%W_0$*MxS4 zX>67CQ|^~s(*(>nyhI=dN!(z>K8s-!>K>u)1QiQntEHAALd?djX0z3VOSv*VZ!k+~tfj=1YI@P++V1*=^OW;3XPje`V~F;tHZ!gW-}s8y z%$WL^``lN|>nu)n)I1QMvWMMId%|)1l+x^`7wy%K4UXSAdTBpu5pA({r`FDX(!L&X znZpPMQ)gRbyTg`f{mi<~I@ubqoQ21432xZ-&&ogM`KV|&cmG-(^1yyX)K|wp;7QXP z*G|+hw*zodkI=?F_?8*E4rB;r9v7|{y32_eB39O5+5C|EW&K~dOGTqY?SXVhDp2P1 zLd)f&JO9SWY#>2mrH-Gz22Zxdon)yU8#s`q#DZg)w%8Y)GqWCn#)4)@v-V4K8!qxK zS&`$S+t4iR*FNIvywEYk)1_71l$HCrDNhN$b|X)3)-&Ks4IKG?frsd4`URSFfbnsl zhC~bDqrE`7(K|rWs7=3svjA9CI#UQ#Y>C^H5wr?;H+lwmsB%a`yrF5Jn?P|^o6(=? zB)l{oiy)4hfEuhm3YRTI4M7J1XH#zX7&RMR&@S2qZ8|4>?9jV-ZO~^>+d`x0r$0-N zZUurq9>t1~8PWym`RDo2FW=2EJcuC+h0z=E_qkGUj*ee6Z z_(Ip{qg{gR1QMiEP&h)_xs$s;L%MSf1_uHP1_U2&v=1L{3=8Bxmf#Kb8O;@Z5U!A( zM)1wIL5GG9GG>CIVz#t%cu2!nDou-irFdt1TC+@BkZg2>9v+Brs@t;=uTCXJq0<*T zAp=mquJCHf=CEd8gBsMmoqMF>+&uD%+WRkP#|z!=2%NxcE_I665A6xU34B3fw1Tpk zLlX4UnIXi7@5U8;k5V`S^w*#d2kjZSA6Ebe2YRXC2b^xAn*x6za9&PVA5H7dd|9|c zDBBXk_c#abIVk7~L9a~hkKC7gg9r%UgLxYTePdiUe6zda)>VQ|!4+Th4}(6v!j%c5 zN>7duzhBT`<}KK4X7<2Icc9q3;R@&MOlUq2;Lm=v8rO6x^#;`l7{-kV1ee`#JJaI1 zmJdO|lG%i(m<%9?df&jE>^(?o`Q$UYOd~M*|ZWXc8beCljrV$@J1@pc$iM z9TQAx=3yr7nrF4P)Dzay!6V(WZ|5MC$bb_%*X_%0NB_sScm%YFg8j5R>ir!7d%nd^g3(MO-7@vnL zzS_{P2I<;A85xBug7>v3U9qh?!LSe4AjLt!>Zj*E<0<|W5KQG(9V;@N0moJaSG+c? zSf#xuL+Z|-sYvnOf5AK-e9pBMLr)(sdU`L?k}|#mRDP|Kfs2NoY^V8NQ$P(xRr1?b z^J{YwIKlwpnogpLM{a56Tv5+LajOa*yLG&q*|E;0rf6`KX^U^AeDvout^eXdcri@0 z8}e$VA+KtAiw9aufR*0vSG}q9DyF+5ckux0tdJ>>761%W-LKK@`qe5AGL0_oAMNp< zNwjoDMr5IkNZ%;^v`D;H-shgbOL~4Q=pib?SOk%;mLEPr#$v4?O9eS2NP{2~WEge= zM=2SvZ0YWAWnk)|*7nmRbT{NS830yg`=~oQ8fX20V|{8>XyddWaKwKUyiKQ#!(1{v z_k)Vhev1;@i!8Fto`ow${7qiwsxTsWjRmRsnek+XjMsi1uWHytd~^^4f}JE_v;YI^ NrAsn^8!%iM{|6KvL}CB{ diff --git a/changelog.d/8386.bugfix b/changelog.d/8386.bugfix new file mode 100644 index 0000000000..24983a1e95 --- /dev/null +++ b/changelog.d/8386.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in v1.20.0 which caused the `synapse_port_db` script to fail. diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index a34bdf1830..ecca8b6e8f 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -89,6 +89,7 @@ BOOLEAN_COLUMNS = { "redactions": ["have_censored"], "room_stats_state": ["is_federatable"], "local_media_repository": ["safe_from_quarantine"], + "users": ["shadow_banned"], } From 920dd1083efb7e38b8b85b4b32f090277d5b69db Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Thu, 24 Sep 2020 16:25:33 +0100 Subject: [PATCH 043/134] 1.20.1 --- CHANGES.md | 10 ++++++++++ changelog.d/8386.bugfix | 1 - changelog.d/8394.bugfix | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 5 files changed, 17 insertions(+), 3 deletions(-) delete mode 100644 changelog.d/8386.bugfix delete mode 100644 changelog.d/8394.bugfix diff --git a/CHANGES.md b/CHANGES.md index 84711de448..650dc8487d 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,13 @@ +Synapse 1.20.1 (2020-09-24) +=========================== + +Bugfixes +-------- + +- Fix a bug introduced in v1.20.0 which caused the `synapse_port_db` script to fail. ([\#8386](https://github.com/matrix-org/synapse/issues/8386)) +- Fix URLs being accidentally escaped in Jinja2 templates. Broke in v1.20.0. ([\#8394](https://github.com/matrix-org/synapse/issues/8394)) + + Synapse 1.20.0 (2020-09-22) =========================== diff --git a/changelog.d/8386.bugfix b/changelog.d/8386.bugfix deleted file mode 100644 index 24983a1e95..0000000000 --- a/changelog.d/8386.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in v1.20.0 which caused the `synapse_port_db` script to fail. diff --git a/changelog.d/8394.bugfix b/changelog.d/8394.bugfix deleted file mode 100644 index 0ac1eeca0a..0000000000 --- a/changelog.d/8394.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix URLs being accidentally escaped in Jinja2 templates. Broke in v1.20.0. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index ae548f9f33..264ef9ce7c 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.20.1) stable; urgency=medium + + * New synapse release 1.20.1. + + -- Synapse Packaging team Thu, 24 Sep 2020 16:25:22 +0100 + matrix-synapse-py3 (1.20.0) stable; urgency=medium [ Synapse Packaging team ] diff --git a/synapse/__init__.py b/synapse/__init__.py index 8242d05f60..e40b582bd5 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -48,7 +48,7 @@ try: except ImportError: pass -__version__ = "1.20.0" +__version__ = "1.20.1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 5ce5a9f1447088bc29cac49f4a0ebfab6c0198d6 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Thu, 24 Sep 2020 16:26:57 +0100 Subject: [PATCH 044/134] Update changelog wording --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 650dc8487d..16e83f6f10 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,7 +5,7 @@ Bugfixes -------- - Fix a bug introduced in v1.20.0 which caused the `synapse_port_db` script to fail. ([\#8386](https://github.com/matrix-org/synapse/issues/8386)) -- Fix URLs being accidentally escaped in Jinja2 templates. Broke in v1.20.0. ([\#8394](https://github.com/matrix-org/synapse/issues/8394)) +- Fix a bug introduced in v1.20.0 which caused URLs to be accidentally escaped in Jinja2 templates. ([\#8394](https://github.com/matrix-org/synapse/issues/8394)) Synapse 1.20.0 (2020-09-22) From 271086ebda55f9ef0a0bdee69c96d79c5005e21d Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Thu, 24 Sep 2020 16:33:49 +0100 Subject: [PATCH 045/134] s/accidentally/incorrectly in changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 16e83f6f10..7ea08fa117 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,7 +5,7 @@ Bugfixes -------- - Fix a bug introduced in v1.20.0 which caused the `synapse_port_db` script to fail. ([\#8386](https://github.com/matrix-org/synapse/issues/8386)) -- Fix a bug introduced in v1.20.0 which caused URLs to be accidentally escaped in Jinja2 templates. ([\#8394](https://github.com/matrix-org/synapse/issues/8394)) +- Fix a bug introduced in v1.20.0 which caused URLs to be incorrectly escaped in Jinja2 templates. ([\#8394](https://github.com/matrix-org/synapse/issues/8394)) Synapse 1.20.0 (2020-09-22) From ab903e7337f6c2c7cfcdac69b13dedf67e56d801 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Thu, 24 Sep 2020 16:35:31 +0100 Subject: [PATCH 046/134] s/URLs/variables in changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 7ea08fa117..5de819ea1e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,7 +5,7 @@ Bugfixes -------- - Fix a bug introduced in v1.20.0 which caused the `synapse_port_db` script to fail. ([\#8386](https://github.com/matrix-org/synapse/issues/8386)) -- Fix a bug introduced in v1.20.0 which caused URLs to be incorrectly escaped in Jinja2 templates. ([\#8394](https://github.com/matrix-org/synapse/issues/8394)) +- Fix a bug introduced in v1.20.0 which caused variables to be incorrectly escaped in Jinja2 templates. ([\#8394](https://github.com/matrix-org/synapse/issues/8394)) Synapse 1.20.0 (2020-09-22) From f112cfe5bb2c918c9e942941686a05664d8bd7da Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 24 Sep 2020 16:53:51 +0100 Subject: [PATCH 047/134] Fix MultiWriteIdGenerator's handling of restarts. (#8374) On startup `MultiWriteIdGenerator` fetches the maximum stream ID for each instance from the table and uses that as its initial "current position" for each writer. This is problematic as a) it involves either a scan of events table or an index (neither of which is ideal), and b) if rows are being persisted out of order elsewhere while the process restarts then using the maximum stream ID is not correct. This could theoretically lead to race conditions where e.g. events that are persisted out of order are not sent down sync streams. We fix this by creating a new table that tracks the current positions of each writer to the stream, and update it each time we finish persisting a new entry. This is a relatively small overhead when persisting events. However for the cache invalidation stream this is a much bigger relative overhead, so instead we note that for invalidation we don't actually care about reliability over restarts (as there's no caches to invalidate) and simply don't bother reading and writing to the new table in that particular case. --- changelog.d/8374.bugfix | 1 + synapse/replication/slave/storage/_base.py | 2 + synapse/storage/databases/main/__init__.py | 8 +- .../storage/databases/main/events_worker.py | 4 + .../schema/delta/58/18stream_positions.sql | 22 +++ synapse/storage/util/id_generators.py | 148 +++++++++++++++--- tests/storage/test_id_generators.py | 119 +++++++++++++- 7 files changed, 274 insertions(+), 30 deletions(-) create mode 100644 changelog.d/8374.bugfix create mode 100644 synapse/storage/databases/main/schema/delta/58/18stream_positions.sql diff --git a/changelog.d/8374.bugfix b/changelog.d/8374.bugfix new file mode 100644 index 0000000000..155bc3404f --- /dev/null +++ b/changelog.d/8374.bugfix @@ -0,0 +1 @@ +Fix theoretical race condition where events are not sent down `/sync` if the synchrotron worker is restarted without restarting other workers. diff --git a/synapse/replication/slave/storage/_base.py b/synapse/replication/slave/storage/_base.py index d25fa49e1a..d0089fe06c 100644 --- a/synapse/replication/slave/storage/_base.py +++ b/synapse/replication/slave/storage/_base.py @@ -31,11 +31,13 @@ class BaseSlavedStore(CacheInvalidationWorkerStore): self._cache_id_gen = MultiWriterIdGenerator( db_conn, database, + stream_name="caches", instance_name=hs.get_instance_name(), table="cache_invalidation_stream_by_instance", instance_column="instance_name", id_column="stream_id", sequence_name="cache_invalidation_stream_seq", + writers=[], ) # type: Optional[MultiWriterIdGenerator] else: self._cache_id_gen = None diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index ccb3384db9..0cb12f4c61 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -160,14 +160,20 @@ class DataStore( ) if isinstance(self.database_engine, PostgresEngine): + # We set the `writers` to an empty list here as we don't care about + # missing updates over restarts, as we'll not have anything in our + # caches to invalidate. (This reduces the amount of writes to the DB + # that happen). self._cache_id_gen = MultiWriterIdGenerator( db_conn, database, - instance_name="master", + stream_name="caches", + instance_name=hs.get_instance_name(), table="cache_invalidation_stream_by_instance", instance_column="instance_name", id_column="stream_id", sequence_name="cache_invalidation_stream_seq", + writers=[], ) else: self._cache_id_gen = None diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index de9e8d1dc6..f95679ebc4 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -83,21 +83,25 @@ class EventsWorkerStore(SQLBaseStore): self._stream_id_gen = MultiWriterIdGenerator( db_conn=db_conn, db=database, + stream_name="events", instance_name=hs.get_instance_name(), table="events", instance_column="instance_name", id_column="stream_ordering", sequence_name="events_stream_seq", + writers=hs.config.worker.writers.events, ) self._backfill_id_gen = MultiWriterIdGenerator( db_conn=db_conn, db=database, + stream_name="backfill", instance_name=hs.get_instance_name(), table="events", instance_column="instance_name", id_column="stream_ordering", sequence_name="events_backfill_stream_seq", positive=False, + writers=hs.config.worker.writers.events, ) else: # We shouldn't be running in worker mode with SQLite, but its useful diff --git a/synapse/storage/databases/main/schema/delta/58/18stream_positions.sql b/synapse/storage/databases/main/schema/delta/58/18stream_positions.sql new file mode 100644 index 0000000000..985fd949a2 --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/58/18stream_positions.sql @@ -0,0 +1,22 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE stream_positions ( + stream_name TEXT NOT NULL, + instance_name TEXT NOT NULL, + stream_id BIGINT NOT NULL +); + +CREATE UNIQUE INDEX stream_positions_idx ON stream_positions(stream_name, instance_name); diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index b0353ac2dc..727fcc521c 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -22,6 +22,7 @@ from typing import Dict, List, Optional, Set, Union import attr from typing_extensions import Deque +from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.database import DatabasePool, LoggingTransaction from synapse.storage.util.sequence import PostgresSequenceGenerator @@ -184,12 +185,16 @@ class MultiWriterIdGenerator: Args: db_conn db + stream_name: A name for the stream. instance_name: The name of this instance. table: Database table associated with stream. instance_column: Column that stores the row's writer's instance name id_column: Column that stores the stream ID. sequence_name: The name of the postgres sequence used to generate new IDs. + writers: A list of known writers to use to populate current positions + on startup. Can be empty if nothing uses `get_current_token` or + `get_positions` (e.g. caches stream). positive: Whether the IDs are positive (true) or negative (false). When using negative IDs we go backwards from -1 to -2, -3, etc. """ @@ -198,16 +203,20 @@ class MultiWriterIdGenerator: self, db_conn, db: DatabasePool, + stream_name: str, instance_name: str, table: str, instance_column: str, id_column: str, sequence_name: str, + writers: List[str], positive: bool = True, ): self._db = db + self._stream_name = stream_name self._instance_name = instance_name self._positive = positive + self._writers = writers self._return_factor = 1 if positive else -1 # We lock as some functions may be called from DB threads. @@ -216,9 +225,7 @@ class MultiWriterIdGenerator: # Note: If we are a negative stream then we still store all the IDs as # positive to make life easier for us, and simply negate the IDs when we # return them. - self._current_positions = self._load_current_ids( - db_conn, table, instance_column, id_column - ) + self._current_positions = {} # type: Dict[str, int] # Set of local IDs that we're still processing. The current position # should be less than the minimum of this set (if not empty). @@ -251,31 +258,81 @@ class MultiWriterIdGenerator: self._sequence_gen = PostgresSequenceGenerator(sequence_name) + # This goes and fills out the above state from the database. + self._load_current_ids(db_conn, table, instance_column, id_column) + def _load_current_ids( self, db_conn, table: str, instance_column: str, id_column: str - ) -> Dict[str, int]: - # If positive stream aggregate via MAX. For negative stream use MIN - # *and* negate the result to get a positive number. - sql = """ - SELECT %(instance)s, %(agg)s(%(id)s) FROM %(table)s - GROUP BY %(instance)s - """ % { - "instance": instance_column, - "id": id_column, - "table": table, - "agg": "MAX" if self._positive else "-MIN", - } - + ): cur = db_conn.cursor() - cur.execute(sql) - # `cur` is an iterable over returned rows, which are 2-tuples. - current_positions = dict(cur) + # Load the current positions of all writers for the stream. + if self._writers: + sql = """ + SELECT instance_name, stream_id FROM stream_positions + WHERE stream_name = ? + """ + sql = self._db.engine.convert_param_style(sql) + + cur.execute(sql, (self._stream_name,)) + + self._current_positions = { + instance: stream_id * self._return_factor + for instance, stream_id in cur + if instance in self._writers + } + + # We set the `_persisted_upto_position` to be the minimum of all current + # positions. If empty we use the max stream ID from the DB table. + min_stream_id = min(self._current_positions.values(), default=None) + + if min_stream_id is None: + sql = """ + SELECT COALESCE(%(agg)s(%(id)s), 1) FROM %(table)s + """ % { + "id": id_column, + "table": table, + "agg": "MAX" if self._positive else "-MIN", + } + cur.execute(sql) + (stream_id,) = cur.fetchone() + self._persisted_upto_position = stream_id + else: + # If we have a min_stream_id then we pull out everything greater + # than it from the DB so that we can prefill + # `_known_persisted_positions` and get a more accurate + # `_persisted_upto_position`. + # + # We also check if any of the later rows are from this instance, in + # which case we use that for this instance's current position. This + # is to handle the case where we didn't finish persisting to the + # stream positions table before restart (or the stream position + # table otherwise got out of date). + + sql = """ + SELECT %(instance)s, %(id)s FROM %(table)s + WHERE ? %(cmp)s %(id)s + """ % { + "id": id_column, + "table": table, + "instance": instance_column, + "cmp": "<=" if self._positive else ">=", + } + sql = self._db.engine.convert_param_style(sql) + cur.execute(sql, (min_stream_id,)) + + self._persisted_upto_position = min_stream_id + + with self._lock: + for (instance, stream_id,) in cur: + stream_id = self._return_factor * stream_id + self._add_persisted_position(stream_id) + + if instance == self._instance_name: + self._current_positions[instance] = stream_id cur.close() - return current_positions - def _load_next_id_txn(self, txn) -> int: return self._sequence_gen.get_next_id_txn(txn) @@ -316,6 +373,21 @@ class MultiWriterIdGenerator: txn.call_after(self._mark_id_as_finished, next_id) txn.call_on_exception(self._mark_id_as_finished, next_id) + # Update the `stream_positions` table with newly updated stream + # ID (unless self._writers is not set in which case we don't + # bother, as nothing will read it). + # + # We only do this on the success path so that the persisted current + # position points to a persited row with the correct instance name. + if self._writers: + txn.call_after( + run_as_background_process, + "MultiWriterIdGenerator._update_table", + self._db.runInteraction, + "MultiWriterIdGenerator._update_table", + self._update_stream_positions_table_txn, + ) + return self._return_factor * next_id def _mark_id_as_finished(self, next_id: int): @@ -447,6 +519,28 @@ class MultiWriterIdGenerator: # do. break + def _update_stream_positions_table_txn(self, txn): + """Update the `stream_positions` table with newly persisted position. + """ + + if not self._writers: + return + + # We upsert the value, ensuring on conflict that we always increase the + # value (or decrease if stream goes backwards). + sql = """ + INSERT INTO stream_positions (stream_name, instance_name, stream_id) + VALUES (?, ?, ?) + ON CONFLICT (stream_name, instance_name) + DO UPDATE SET + stream_id = %(agg)s(stream_positions.stream_id, EXCLUDED.stream_id) + """ % { + "agg": "GREATEST" if self._positive else "LEAST", + } + + pos = (self.get_current_token_for_writer(self._instance_name),) + txn.execute(sql, (self._stream_name, self._instance_name, pos)) + @attr.s(slots=True) class _AsyncCtxManagerWrapper: @@ -503,4 +597,16 @@ class _MultiWriterCtxManager: if exc_type is not None: return False + # Update the `stream_positions` table with newly updated stream + # ID (unless self._writers is not set in which case we don't + # bother, as nothing will read it). + # + # We only do this on the success path so that the persisted current + # position points to a persisted row with the correct instance name. + if self.id_gen._writers: + await self.id_gen._db.runInteraction( + "MultiWriterIdGenerator._update_table", + self.id_gen._update_stream_positions_table_txn, + ) + return False diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py index fb8f5bc255..d4ff55fbff 100644 --- a/tests/storage/test_id_generators.py +++ b/tests/storage/test_id_generators.py @@ -43,16 +43,20 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): """ ) - def _create_id_generator(self, instance_name="master") -> MultiWriterIdGenerator: + def _create_id_generator( + self, instance_name="master", writers=["master"] + ) -> MultiWriterIdGenerator: def _create(conn): return MultiWriterIdGenerator( conn, self.db_pool, + stream_name="test_stream", instance_name=instance_name, table="foobar", instance_column="instance_name", id_column="stream_id", sequence_name="foobar_seq", + writers=writers, ) return self.get_success(self.db_pool.runWithConnection(_create)) @@ -68,6 +72,13 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): "INSERT INTO foobar VALUES (nextval('foobar_seq'), ?)", (instance_name,), ) + txn.execute( + """ + INSERT INTO stream_positions VALUES ('test_stream', ?, lastval()) + ON CONFLICT (stream_name, instance_name) DO UPDATE SET stream_id = lastval() + """, + (instance_name,), + ) self.get_success(self.db_pool.runInteraction("_insert_rows", _insert)) @@ -81,6 +92,13 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): "INSERT INTO foobar VALUES (?, ?)", (stream_id, instance_name,), ) txn.execute("SELECT setval('foobar_seq', ?)", (stream_id,)) + txn.execute( + """ + INSERT INTO stream_positions VALUES ('test_stream', ?, ?) + ON CONFLICT (stream_name, instance_name) DO UPDATE SET stream_id = ? + """, + (instance_name, stream_id, stream_id), + ) self.get_success(self.db_pool.runInteraction("_insert_row_with_id", _insert)) @@ -179,8 +197,8 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): self._insert_rows("first", 3) self._insert_rows("second", 4) - first_id_gen = self._create_id_generator("first") - second_id_gen = self._create_id_generator("second") + first_id_gen = self._create_id_generator("first", writers=["first", "second"]) + second_id_gen = self._create_id_generator("second", writers=["first", "second"]) self.assertEqual(first_id_gen.get_positions(), {"first": 3, "second": 7}) self.assertEqual(first_id_gen.get_current_token_for_writer("first"), 3) @@ -262,7 +280,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): self._insert_row_with_id("first", 3) self._insert_row_with_id("second", 5) - id_gen = self._create_id_generator("first") + id_gen = self._create_id_generator("first", writers=["first", "second"]) self.assertEqual(id_gen.get_positions(), {"first": 3, "second": 5}) @@ -300,7 +318,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): self._insert_row_with_id("first", 3) self._insert_row_with_id("second", 5) - id_gen = self._create_id_generator("first") + id_gen = self._create_id_generator("first", writers=["first", "second"]) self.assertEqual(id_gen.get_positions(), {"first": 3, "second": 5}) @@ -319,6 +337,80 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): # `persisted_upto_position` in this case, then it will be correct in the # other cases that are tested above (since they'll hit the same code). + def test_restart_during_out_of_order_persistence(self): + """Test that restarting a process while another process is writing out + of order updates are handled correctly. + """ + + # Prefill table with 7 rows written by 'master' + self._insert_rows("master", 7) + + id_gen = self._create_id_generator() + + self.assertEqual(id_gen.get_positions(), {"master": 7}) + self.assertEqual(id_gen.get_current_token_for_writer("master"), 7) + + # Persist two rows at once + ctx1 = self.get_success(id_gen.get_next()) + ctx2 = self.get_success(id_gen.get_next()) + + s1 = self.get_success(ctx1.__aenter__()) + s2 = self.get_success(ctx2.__aenter__()) + + self.assertEqual(s1, 8) + self.assertEqual(s2, 9) + + self.assertEqual(id_gen.get_positions(), {"master": 7}) + self.assertEqual(id_gen.get_current_token_for_writer("master"), 7) + + # We finish persisting the second row before restart + self.get_success(ctx2.__aexit__(None, None, None)) + + # We simulate a restart of another worker by just creating a new ID gen. + id_gen_worker = self._create_id_generator("worker") + + # Restarted worker should not see the second persisted row + self.assertEqual(id_gen_worker.get_positions(), {"master": 7}) + self.assertEqual(id_gen_worker.get_current_token_for_writer("master"), 7) + + # Now if we persist the first row then both instances should jump ahead + # correctly. + self.get_success(ctx1.__aexit__(None, None, None)) + + self.assertEqual(id_gen.get_positions(), {"master": 9}) + id_gen_worker.advance("master", 9) + self.assertEqual(id_gen_worker.get_positions(), {"master": 9}) + + def test_writer_config_change(self): + """Test that changing the writer config correctly works. + """ + + self._insert_row_with_id("first", 3) + self._insert_row_with_id("second", 5) + + # Initial config has two writers + id_gen = self._create_id_generator("first", writers=["first", "second"]) + self.assertEqual(id_gen.get_persisted_upto_position(), 3) + + # New config removes one of the configs. Note that if the writer is + # removed from config we assume that it has been shut down and has + # finished persisting, hence why the persisted upto position is 5. + id_gen_2 = self._create_id_generator("second", writers=["second"]) + self.assertEqual(id_gen_2.get_persisted_upto_position(), 5) + + # This config points to a single, previously unused writer. + id_gen_3 = self._create_id_generator("third", writers=["third"]) + self.assertEqual(id_gen_3.get_persisted_upto_position(), 5) + + # Check that we get a sane next stream ID with this new config. + + async def _get_next_async(): + async with id_gen_3.get_next() as stream_id: + self.assertEqual(stream_id, 6) + + self.get_success(_get_next_async()) + self.assertEqual(id_gen_3.get_persisted_upto_position(), 6) + class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase): """Tests MultiWriterIdGenerator that produce *negative* stream IDs. @@ -345,16 +437,20 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase): """ ) - def _create_id_generator(self, instance_name="master") -> MultiWriterIdGenerator: + def _create_id_generator( + self, instance_name="master", writers=["master"] + ) -> MultiWriterIdGenerator: def _create(conn): return MultiWriterIdGenerator( conn, self.db_pool, + stream_name="test_stream", instance_name=instance_name, table="foobar", instance_column="instance_name", id_column="stream_id", sequence_name="foobar_seq", + writers=writers, positive=False, ) @@ -368,6 +464,13 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase): txn.execute( "INSERT INTO foobar VALUES (?, ?)", (stream_id, instance_name,), ) + txn.execute( + """ + INSERT INTO stream_positions VALUES ('test_stream', ?, ?) + ON CONFLICT (stream_name, instance_name) DO UPDATE SET stream_id = ? + """, + (instance_name, -stream_id, -stream_id), + ) self.get_success(self.db_pool.runInteraction("_insert_row", _insert)) @@ -409,8 +512,8 @@ class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase): """Tests that having multiple instances that get advanced over federation works corretly. """ - id_gen_1 = self._create_id_generator("first") - id_gen_2 = self._create_id_generator("second") + id_gen_1 = self._create_id_generator("first", writers=["first", "second"]) + id_gen_2 = self._create_id_generator("second", writers=["first", "second"]) async def _get_next_async(): async with id_gen_1.get_next() as stream_id: From 3e87d79e1c6ef894387ee2f24e008dfb8f5f853f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 25 Sep 2020 09:58:32 +0100 Subject: [PATCH 048/134] Fix schema delta for servers that have not backfilled (#8396) Fixes #8395. --- changelog.d/8396.feature | 1 + .../schema/delta/58/14events_instance_name.sql.postgres | 4 +++- synapse/storage/util/id_generators.py | 6 +++++- 3 files changed, 9 insertions(+), 2 deletions(-) create mode 100644 changelog.d/8396.feature diff --git a/changelog.d/8396.feature b/changelog.d/8396.feature new file mode 100644 index 0000000000..b363e929ea --- /dev/null +++ b/changelog.d/8396.feature @@ -0,0 +1 @@ +Add experimental support for sharding event persister. diff --git a/synapse/storage/databases/main/schema/delta/58/14events_instance_name.sql.postgres b/synapse/storage/databases/main/schema/delta/58/14events_instance_name.sql.postgres index 97c1e6a0c5..c31f9af82a 100644 --- a/synapse/storage/databases/main/schema/delta/58/14events_instance_name.sql.postgres +++ b/synapse/storage/databases/main/schema/delta/58/14events_instance_name.sql.postgres @@ -21,6 +21,8 @@ SELECT setval('events_stream_seq', ( CREATE SEQUENCE IF NOT EXISTS events_backfill_stream_seq; +-- If the server has never backfilled a room then doing `-MIN(...)` will give +-- a negative result, hence why we do `GREATEST(...)` SELECT setval('events_backfill_stream_seq', ( - SELECT COALESCE(-MIN(stream_ordering), 1) FROM events + SELECT GREATEST(COALESCE(-MIN(stream_ordering), 1), 1) FROM events )); diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 727fcc521c..4269eaf918 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -287,8 +287,12 @@ class MultiWriterIdGenerator: min_stream_id = min(self._current_positions.values(), default=None) if min_stream_id is None: + # We add a GREATEST here to ensure that the result is always + # positive. (This can be a problem for e.g. backfill streams where + # the server has never backfilled). sql = """ - SELECT COALESCE(%(agg)s(%(id)s), 1) FROM %(table)s + SELECT GREATEST(COALESCE(%(agg)s(%(id)s), 1), 1) + FROM %(table)s """ % { "id": id_column, "table": table, From abd04b6af0671517a01781c8bd10fef2a6c32cc4 Mon Sep 17 00:00:00 2001 From: Tdxdxoz Date: Fri, 25 Sep 2020 19:01:45 +0800 Subject: [PATCH 049/134] Allow existing users to login via OpenID Connect. (#8345) Co-authored-by: Benjamin Koch This adds configuration flags that will match a user to pre-existing users when logging in via OpenID Connect. This is useful when switching to an existing SSO system. --- changelog.d/8345.feature | 1 + docs/sample_config.yaml | 5 +++ synapse/config/oidc_config.py | 6 +++ synapse/handlers/oidc_handler.py | 42 ++++++++++++------- .../storage/databases/main/registration.py | 4 +- tests/handlers/test_oidc.py | 35 ++++++++++++++++ 6 files changed, 76 insertions(+), 17 deletions(-) create mode 100644 changelog.d/8345.feature diff --git a/changelog.d/8345.feature b/changelog.d/8345.feature new file mode 100644 index 0000000000..4ee5b6a56e --- /dev/null +++ b/changelog.d/8345.feature @@ -0,0 +1 @@ +Add a configuration option that allows existing users to log in with OpenID Connect. Contributed by @BBBSnowball and @OmmyZhang. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index fb04ff283d..845f537795 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1689,6 +1689,11 @@ oidc_config: # #skip_verification: true + # Uncomment to allow a user logging in via OIDC to match a pre-existing account instead + # of failing. This could be used if switching from password logins to OIDC. Defaults to false. + # + #allow_existing_users: true + # An external module can be provided here as a custom solution to mapping # attributes returned from a OIDC provider onto a matrix user. # diff --git a/synapse/config/oidc_config.py b/synapse/config/oidc_config.py index e0939bce84..70fc8a2f62 100644 --- a/synapse/config/oidc_config.py +++ b/synapse/config/oidc_config.py @@ -56,6 +56,7 @@ class OIDCConfig(Config): self.oidc_userinfo_endpoint = oidc_config.get("userinfo_endpoint") self.oidc_jwks_uri = oidc_config.get("jwks_uri") self.oidc_skip_verification = oidc_config.get("skip_verification", False) + self.oidc_allow_existing_users = oidc_config.get("allow_existing_users", False) ump_config = oidc_config.get("user_mapping_provider", {}) ump_config.setdefault("module", DEFAULT_USER_MAPPING_PROVIDER) @@ -158,6 +159,11 @@ class OIDCConfig(Config): # #skip_verification: true + # Uncomment to allow a user logging in via OIDC to match a pre-existing account instead + # of failing. This could be used if switching from password logins to OIDC. Defaults to false. + # + #allow_existing_users: true + # An external module can be provided here as a custom solution to mapping # attributes returned from a OIDC provider onto a matrix user. # diff --git a/synapse/handlers/oidc_handler.py b/synapse/handlers/oidc_handler.py index 4230dbaf99..0e06e4408d 100644 --- a/synapse/handlers/oidc_handler.py +++ b/synapse/handlers/oidc_handler.py @@ -114,6 +114,7 @@ class OidcHandler: hs.config.oidc_user_mapping_provider_config ) # type: OidcMappingProvider self._skip_verification = hs.config.oidc_skip_verification # type: bool + self._allow_existing_users = hs.config.oidc_allow_existing_users # type: bool self._http_client = hs.get_proxied_http_client() self._auth_handler = hs.get_auth_handler() @@ -849,7 +850,8 @@ class OidcHandler: If we don't find the user that way, we should register the user, mapping the localpart and the display name from the UserInfo. - If a user already exists with the mxid we've mapped, raise an exception. + If a user already exists with the mxid we've mapped and allow_existing_users + is disabled, raise an exception. Args: userinfo: an object representing the user @@ -905,21 +907,31 @@ class OidcHandler: localpart = map_username_to_mxid_localpart(attributes["localpart"]) - user_id = UserID(localpart, self._hostname) - if await self._datastore.get_users_by_id_case_insensitive(user_id.to_string()): - # This mxid is taken - raise MappingException( - "mxid '{}' is already taken".format(user_id.to_string()) + user_id = UserID(localpart, self._hostname).to_string() + users = await self._datastore.get_users_by_id_case_insensitive(user_id) + if users: + if self._allow_existing_users: + if len(users) == 1: + registered_user_id = next(iter(users)) + elif user_id in users: + registered_user_id = user_id + else: + raise MappingException( + "Attempted to login as '{}' but it matches more than one user inexactly: {}".format( + user_id, list(users.keys()) + ) + ) + else: + # This mxid is taken + raise MappingException("mxid '{}' is already taken".format(user_id)) + else: + # It's the first time this user is logging in and the mapped mxid was + # not taken, register the user + registered_user_id = await self._registration_handler.register_user( + localpart=localpart, + default_display_name=attributes["display_name"], + user_agent_ips=(user_agent, ip_address), ) - - # It's the first time this user is logging in and the mapped mxid was - # not taken, register the user - registered_user_id = await self._registration_handler.register_user( - localpart=localpart, - default_display_name=attributes["display_name"], - user_agent_ips=(user_agent, ip_address), - ) - await self._datastore.record_user_external_id( self._auth_provider_id, remote_user_id, registered_user_id, ) diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 33825e8949..48ce7ecd16 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -393,7 +393,7 @@ class RegistrationWorkerStore(SQLBaseStore): async def get_user_by_external_id( self, auth_provider: str, external_id: str - ) -> str: + ) -> Optional[str]: """Look up a user by their external auth id Args: @@ -401,7 +401,7 @@ class RegistrationWorkerStore(SQLBaseStore): external_id: id on that system Returns: - str|None: the mxid of the user, or None if they are not known + the mxid of the user, or None if they are not known """ return await self.db_pool.simple_select_one_onecol( table="user_external_ids", diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index 89ec5fcb31..5910772aa8 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -617,3 +617,38 @@ class OidcHandlerTestCase(HomeserverTestCase): ) ) self.assertEqual(mxid, "@test_user_2:test") + + # Test if the mxid is already taken + store = self.hs.get_datastore() + user3 = UserID.from_string("@test_user_3:test") + self.get_success( + store.register_user(user_id=user3.to_string(), password_hash=None) + ) + userinfo = {"sub": "test3", "username": "test_user_3"} + e = self.get_failure( + self.handler._map_userinfo_to_user( + userinfo, token, "user-agent", "10.10.10.10" + ), + MappingException, + ) + self.assertEqual(str(e.value), "mxid '@test_user_3:test' is already taken") + + @override_config({"oidc_config": {"allow_existing_users": True}}) + def test_map_userinfo_to_existing_user(self): + """Existing users can log in with OpenID Connect when allow_existing_users is True.""" + store = self.hs.get_datastore() + user4 = UserID.from_string("@test_user_4:test") + self.get_success( + store.register_user(user_id=user4.to_string(), password_hash=None) + ) + userinfo = { + "sub": "test4", + "username": "test_user_4", + } + token = {} + mxid = self.get_success( + self.handler._map_userinfo_to_user( + userinfo, token, "user-agent", "10.10.10.10" + ) + ) + self.assertEqual(mxid, "@test_user_4:test") From fec6f9ac178867a8e7c5410e0d25898f29bab35c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 25 Sep 2020 12:29:54 +0100 Subject: [PATCH 050/134] Fix occasional "Re-starting finished log context" from keyring (#8398) * Fix test_verify_json_objects_for_server_awaits_previous_requests It turns out that this wasn't really testing what it thought it was testing (in particular, `check_context` was turning failures into success, which was making the tests pass even though it wasn't clear they should have been. It was also somewhat overcomplex - we can test what it was trying to test without mocking out perspectives servers. * Fix warnings about finished logcontexts in the keyring We need to make sure that we finish the key fetching magic before we run the verifying code, to ensure that we don't mess up our logcontexts. --- changelog.d/8398.bugfix | 1 + synapse/crypto/keyring.py | 70 ++++++++++++-------- tests/crypto/test_keyring.py | 120 ++++++++++++++++------------------- 3 files changed, 101 insertions(+), 90 deletions(-) create mode 100644 changelog.d/8398.bugfix diff --git a/changelog.d/8398.bugfix b/changelog.d/8398.bugfix new file mode 100644 index 0000000000..e432aeebf1 --- /dev/null +++ b/changelog.d/8398.bugfix @@ -0,0 +1 @@ +Fix "Re-starting finished log context" warning when receiving an event we already had over federation. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 42e4087a92..c04ad77cf9 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -42,7 +42,6 @@ from synapse.api.errors import ( ) from synapse.logging.context import ( PreserveLoggingContext, - current_context, make_deferred_yieldable, preserve_fn, run_in_background, @@ -233,8 +232,6 @@ class Keyring: """ try: - ctx = current_context() - # map from server name to a set of outstanding request ids server_to_request_ids = {} @@ -265,12 +262,8 @@ class Keyring: # if there are no more requests for this server, we can drop the lock. if not server_requests: - with PreserveLoggingContext(ctx): - logger.debug("Releasing key lookup lock on %s", server_name) - - # ... but not immediately, as that can cause stack explosions if - # we get a long queue of lookups. - self.clock.call_later(0, drop_server_lock, server_name) + logger.debug("Releasing key lookup lock on %s", server_name) + drop_server_lock(server_name) return res @@ -335,20 +328,32 @@ class Keyring: ) # look for any requests which weren't satisfied - with PreserveLoggingContext(): - for verify_request in remaining_requests: - verify_request.key_ready.errback( - SynapseError( - 401, - "No key for %s with ids in %s (min_validity %i)" - % ( - verify_request.server_name, - verify_request.key_ids, - verify_request.minimum_valid_until_ts, - ), - Codes.UNAUTHORIZED, - ) + while remaining_requests: + verify_request = remaining_requests.pop() + rq_str = ( + "VerifyJsonRequest(server=%s, key_ids=%s, min_valid=%i)" + % ( + verify_request.server_name, + verify_request.key_ids, + verify_request.minimum_valid_until_ts, ) + ) + + # If we run the errback immediately, it may cancel our + # loggingcontext while we are still in it, so instead we + # schedule it for the next time round the reactor. + # + # (this also ensures that we don't get a stack overflow if we + # has a massive queue of lookups waiting for this server). + self.clock.call_later( + 0, + verify_request.key_ready.errback, + SynapseError( + 401, + "Failed to find any key to satisfy %s" % (rq_str,), + Codes.UNAUTHORIZED, + ), + ) except Exception as err: # we don't really expect to get here, because any errors should already # have been caught and logged. But if we do, let's log the error and make @@ -410,10 +415,23 @@ class Keyring: # key was not valid at this point continue - with PreserveLoggingContext(): - verify_request.key_ready.callback( - (server_name, key_id, fetch_key_result.verify_key) - ) + # we have a valid key for this request. If we run the callback + # immediately, it may cancel our loggingcontext while we are still in + # it, so instead we schedule it for the next time round the reactor. + # + # (this also ensures that we don't get a stack overflow if we had + # a massive queue of lookups waiting for this server). + logger.debug( + "Found key %s:%s for %s", + server_name, + key_id, + verify_request.request_name, + ) + self.clock.call_later( + 0, + verify_request.key_ready.callback, + (server_name, key_id, fetch_key_result.verify_key), + ) completed.append(verify_request) break diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 2e6e7abf1f..5cf408f21f 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -23,6 +23,7 @@ from nacl.signing import SigningKey from signedjson.key import encode_verify_key_base64, get_verify_key from twisted.internet import defer +from twisted.internet.defer import Deferred, ensureDeferred from synapse.api.errors import SynapseError from synapse.crypto import keyring @@ -33,7 +34,6 @@ from synapse.crypto.keyring import ( ) from synapse.logging.context import ( LoggingContext, - PreserveLoggingContext, current_context, make_deferred_yieldable, ) @@ -68,54 +68,40 @@ class MockPerspectiveServer: class KeyringTestCase(unittest.HomeserverTestCase): - def make_homeserver(self, reactor, clock): - self.mock_perspective_server = MockPerspectiveServer() - self.http_client = Mock() - - config = self.default_config() - config["trusted_key_servers"] = [ - { - "server_name": self.mock_perspective_server.server_name, - "verify_keys": self.mock_perspective_server.get_verify_keys(), - } - ] - - return self.setup_test_homeserver( - handlers=None, http_client=self.http_client, config=config - ) - - def check_context(self, _, expected): + def check_context(self, val, expected): self.assertEquals(getattr(current_context(), "request", None), expected) + return val def test_verify_json_objects_for_server_awaits_previous_requests(self): - key1 = signedjson.key.generate_signing_key(1) + mock_fetcher = keyring.KeyFetcher() + mock_fetcher.get_keys = Mock() + kr = keyring.Keyring(self.hs, key_fetchers=(mock_fetcher,)) - kr = keyring.Keyring(self.hs) + # a signed object that we are going to try to validate + key1 = signedjson.key.generate_signing_key(1) json1 = {} signedjson.sign.sign_json(json1, "server10", key1) - persp_resp = { - "server_keys": [ - self.mock_perspective_server.get_signed_key( - "server10", signedjson.key.get_verify_key(key1) - ) - ] - } - persp_deferred = defer.Deferred() + # start off a first set of lookups. We make the mock fetcher block until this + # deferred completes. + first_lookup_deferred = Deferred() - async def get_perspectives(**kwargs): - self.assertEquals(current_context().request, "11") - with PreserveLoggingContext(): - await persp_deferred - return persp_resp + async def first_lookup_fetch(keys_to_fetch): + self.assertEquals(current_context().request, "context_11") + self.assertEqual(keys_to_fetch, {"server10": {get_key_id(key1): 0}}) - self.http_client.post_json.side_effect = get_perspectives + await make_deferred_yieldable(first_lookup_deferred) + return { + "server10": { + get_key_id(key1): FetchKeyResult(get_verify_key(key1), 100) + } + } - # start off a first set of lookups - @defer.inlineCallbacks - def first_lookup(): - with LoggingContext("11") as context_11: - context_11.request = "11" + mock_fetcher.get_keys.side_effect = first_lookup_fetch + + async def first_lookup(): + with LoggingContext("context_11") as context_11: + context_11.request = "context_11" res_deferreds = kr.verify_json_objects_for_server( [("server10", json1, 0, "test10"), ("server11", {}, 0, "test11")] @@ -124,7 +110,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): # the unsigned json should be rejected pretty quickly self.assertTrue(res_deferreds[1].called) try: - yield res_deferreds[1] + await res_deferreds[1] self.assertFalse("unsigned json didn't cause a failure") except SynapseError: pass @@ -132,45 +118,51 @@ class KeyringTestCase(unittest.HomeserverTestCase): self.assertFalse(res_deferreds[0].called) res_deferreds[0].addBoth(self.check_context, None) - yield make_deferred_yieldable(res_deferreds[0]) + await make_deferred_yieldable(res_deferreds[0]) - # let verify_json_objects_for_server finish its work before we kill the - # logcontext - yield self.clock.sleep(0) + d0 = ensureDeferred(first_lookup()) - d0 = first_lookup() - - # wait a tick for it to send the request to the perspectives server - # (it first tries the datastore) - self.pump() - self.http_client.post_json.assert_called_once() + mock_fetcher.get_keys.assert_called_once() # a second request for a server with outstanding requests # should block rather than start a second call - @defer.inlineCallbacks - def second_lookup(): - with LoggingContext("12") as context_12: - context_12.request = "12" - self.http_client.post_json.reset_mock() - self.http_client.post_json.return_value = defer.Deferred() + + async def second_lookup_fetch(keys_to_fetch): + self.assertEquals(current_context().request, "context_12") + return { + "server10": { + get_key_id(key1): FetchKeyResult(get_verify_key(key1), 100) + } + } + + mock_fetcher.get_keys.reset_mock() + mock_fetcher.get_keys.side_effect = second_lookup_fetch + second_lookup_state = [0] + + async def second_lookup(): + with LoggingContext("context_12") as context_12: + context_12.request = "context_12" res_deferreds_2 = kr.verify_json_objects_for_server( [("server10", json1, 0, "test")] ) res_deferreds_2[0].addBoth(self.check_context, None) - yield make_deferred_yieldable(res_deferreds_2[0]) + second_lookup_state[0] = 1 + await make_deferred_yieldable(res_deferreds_2[0]) + second_lookup_state[0] = 2 - # let verify_json_objects_for_server finish its work before we kill the - # logcontext - yield self.clock.sleep(0) - - d2 = second_lookup() + d2 = ensureDeferred(second_lookup()) self.pump() - self.http_client.post_json.assert_not_called() + # the second request should be pending, but the fetcher should not yet have been + # called + self.assertEqual(second_lookup_state[0], 1) + mock_fetcher.get_keys.assert_not_called() # complete the first request - persp_deferred.callback(persp_resp) + first_lookup_deferred.callback(None) + + # and now both verifications should succeed. self.get_success(d0) self.get_success(d2) From 31acc5c30938bd532670d45304f6750de6e6e759 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 25 Sep 2020 11:05:54 -0400 Subject: [PATCH 051/134] Escape the error description on the sso_error template. (#8405) --- changelog.d/8405.feature | 1 + synapse/res/templates/sso_error.html | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8405.feature diff --git a/changelog.d/8405.feature b/changelog.d/8405.feature new file mode 100644 index 0000000000..f3c4a74bc7 --- /dev/null +++ b/changelog.d/8405.feature @@ -0,0 +1 @@ +Consolidate the SSO error template across all configuration. diff --git a/synapse/res/templates/sso_error.html b/synapse/res/templates/sso_error.html index af8459719a..944bc9c9ca 100644 --- a/synapse/res/templates/sso_error.html +++ b/synapse/res/templates/sso_error.html @@ -12,7 +12,7 @@

There was an error during authentication:

-
{{ error_description }}
+
{{ error_description | e }}

If you are seeing this page after clicking a link sent to you via email, make sure you only click the confirmation link once, and that you open the From 4b3a1faa08f5ad16e0e00dc629fb25be520575d7 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Mon, 28 Sep 2020 00:23:35 +0100 Subject: [PATCH 052/134] typo --- synapse/storage/databases/main/schema/delta/56/event_labels.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/databases/main/schema/delta/56/event_labels.sql b/synapse/storage/databases/main/schema/delta/56/event_labels.sql index 5e29c1da19..ccf287971c 100644 --- a/synapse/storage/databases/main/schema/delta/56/event_labels.sql +++ b/synapse/storage/databases/main/schema/delta/56/event_labels.sql @@ -13,7 +13,7 @@ * limitations under the License. */ --- room_id and topoligical_ordering are denormalised from the events table in order to +-- room_id and topological_ordering are denormalised from the events table in order to -- make the index work. CREATE TABLE IF NOT EXISTS event_labels ( event_id TEXT, From 450ec4844599b6f06ff6c699a8edc067fa7d4217 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 28 Sep 2020 13:15:00 +0100 Subject: [PATCH 053/134] A pair of tiny cleanups in the federation request code. (#8401) --- changelog.d/8401.misc | 1 + synapse/handlers/federation.py | 2 +- synapse/http/matrixfederationclient.py | 2 -- 3 files changed, 2 insertions(+), 3 deletions(-) create mode 100644 changelog.d/8401.misc diff --git a/changelog.d/8401.misc b/changelog.d/8401.misc new file mode 100644 index 0000000000..27fd7ab129 --- /dev/null +++ b/changelog.d/8401.misc @@ -0,0 +1 @@ +A pair of tiny cleanups in the federation request code. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 9f773aefa7..5bcfb231b2 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -281,7 +281,7 @@ class FederationHandler(BaseHandler): raise Exception( "Error fetching missing prev_events for %s: %s" % (event_id, e) - ) + ) from e # Update the set of things we've seen after trying to # fetch the missing stuff diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 3c86cbc546..b02c74ab2d 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -473,8 +473,6 @@ class MatrixFederationHttpClient: ) response = await request_deferred - except TimeoutError as e: - raise RequestSendFailed(e, can_retry=True) from e except DNSLookupError as e: raise RequestSendFailed(e, can_retry=retry_on_dns_fail) from e except Exception as e: From bd715e12786f4e48d7a8a1973119bbc0502ecff3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dagfinn=20Ilmari=20Manns=C3=A5ker?= Date: Mon, 28 Sep 2020 15:35:02 +0100 Subject: [PATCH 054/134] Add `ui_auth_sessions_ips` table to `synapse_port_db` ignore list (#8410) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This table was created in #8034 (1.20.0). It references `ui_auth_sessions`, which is ignored, so this one should be too. Signed-off-by: Dagfinn Ilmari MannsĂ„ker --- changelog.d/8410.bugfix | 1 + scripts/synapse_port_db | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/8410.bugfix diff --git a/changelog.d/8410.bugfix b/changelog.d/8410.bugfix new file mode 100644 index 0000000000..1323ddc525 --- /dev/null +++ b/changelog.d/8410.bugfix @@ -0,0 +1 @@ +Fix a v1.20.0 regression in the `synapse_port_db` script regarding the `ui_auth_sessions_ips` table. diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index 684a518b8e..ae2887b7d2 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -145,6 +145,7 @@ IGNORED_TABLES = { # the sessions are transient anyway, so ignore them. "ui_auth_sessions", "ui_auth_sessions_credentials", + "ui_auth_sessions_ips", } From 5e3ca12b158b4abefe2e3a54259ab5255dca93d8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 28 Sep 2020 17:58:33 +0100 Subject: [PATCH 055/134] Create a mechanism for marking tests "logcontext clean" (#8399) --- changelog.d/8399.misc | 1 + synapse/logging/context.py | 43 +++++++++++++++++++----------------- tests/crypto/test_keyring.py | 3 +++ tests/unittest.py | 15 ++++++++++++- 4 files changed, 41 insertions(+), 21 deletions(-) create mode 100644 changelog.d/8399.misc diff --git a/changelog.d/8399.misc b/changelog.d/8399.misc new file mode 100644 index 0000000000..ce6e8123cf --- /dev/null +++ b/changelog.d/8399.misc @@ -0,0 +1 @@ +Create a mechanism for marking tests "logcontext clean". diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 2e282d9d67..ca0c774cc5 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -65,6 +65,11 @@ except Exception: return None +# a hook which can be set during testing to assert that we aren't abusing logcontexts. +def logcontext_error(msg: str): + logger.warning(msg) + + # get an id for the current thread. # # threading.get_ident doesn't actually return an OS-level tid, and annoyingly, @@ -330,10 +335,9 @@ class LoggingContext: """Enters this logging context into thread local storage""" old_context = set_current_context(self) if self.previous_context != old_context: - logger.warning( - "Expected previous context %r, found %r", - self.previous_context, - old_context, + logcontext_error( + "Expected previous context %r, found %r" + % (self.previous_context, old_context,) ) return self @@ -346,10 +350,10 @@ class LoggingContext: current = set_current_context(self.previous_context) if current is not self: if current is SENTINEL_CONTEXT: - logger.warning("Expected logging context %s was lost", self) + logcontext_error("Expected logging context %s was lost" % (self,)) else: - logger.warning( - "Expected logging context %s but found %s", self, current + logcontext_error( + "Expected logging context %s but found %s" % (self, current) ) # the fact that we are here suggests that the caller thinks that everything @@ -387,16 +391,16 @@ class LoggingContext: support getrusuage. """ if get_thread_id() != self.main_thread: - logger.warning("Started logcontext %s on different thread", self) + logcontext_error("Started logcontext %s on different thread" % (self,)) return if self.finished: - logger.warning("Re-starting finished log context %s", self) + logcontext_error("Re-starting finished log context %s" % (self,)) # If we haven't already started record the thread resource usage so # far if self.usage_start: - logger.warning("Re-starting already-active log context %s", self) + logcontext_error("Re-starting already-active log context %s" % (self,)) else: self.usage_start = rusage @@ -414,7 +418,7 @@ class LoggingContext: try: if get_thread_id() != self.main_thread: - logger.warning("Stopped logcontext %s on different thread", self) + logcontext_error("Stopped logcontext %s on different thread" % (self,)) return if not rusage: @@ -422,9 +426,9 @@ class LoggingContext: # Record the cpu used since we started if not self.usage_start: - logger.warning( - "Called stop on logcontext %s without recording a start rusage", - self, + logcontext_error( + "Called stop on logcontext %s without recording a start rusage" + % (self,) ) return @@ -584,14 +588,13 @@ class PreserveLoggingContext: if context != self._new_context: if not context: - logger.warning( - "Expected logging context %s was lost", self._new_context + logcontext_error( + "Expected logging context %s was lost" % (self._new_context,) ) else: - logger.warning( - "Expected logging context %s but found %s", - self._new_context, - context, + logcontext_error( + "Expected logging context %s but found %s" + % (self._new_context, context,) ) diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 5cf408f21f..8ff1460c0d 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -41,6 +41,7 @@ from synapse.storage.keys import FetchKeyResult from tests import unittest from tests.test_utils import make_awaitable +from tests.unittest import logcontext_clean class MockPerspectiveServer: @@ -67,6 +68,7 @@ class MockPerspectiveServer: signedjson.sign.sign_json(res, self.server_name, self.key) +@logcontext_clean class KeyringTestCase(unittest.HomeserverTestCase): def check_context(self, val, expected): self.assertEquals(getattr(current_context(), "request", None), expected) @@ -309,6 +311,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): mock_fetcher2.get_keys.assert_called_once() +@logcontext_clean class ServerKeyFetcherTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): self.http_client = Mock() diff --git a/tests/unittest.py b/tests/unittest.py index dabf69cff4..bbe50c3851 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -23,7 +23,7 @@ import logging import time from typing import Optional, Tuple, Type, TypeVar, Union -from mock import Mock +from mock import Mock, patch from canonicaljson import json @@ -169,6 +169,19 @@ def INFO(target): return target +def logcontext_clean(target): + """A decorator which marks the TestCase or method as 'logcontext_clean' + + ... ie, any logcontext errors should cause a test failure + """ + + def logcontext_error(msg): + raise AssertionError("logcontext error: %s" % (msg)) + + patcher = patch("synapse.logging.context.logcontext_error", new=logcontext_error) + return patcher(target) + + class HomeserverTestCase(TestCase): """ A base TestCase that reduces boilerplate for HomeServer-using test cases. From bd380d942fdf91cf1214d6859f2bc97d12a92ab4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 28 Sep 2020 18:00:30 +0100 Subject: [PATCH 056/134] Add checks for postgres sequence consistency (#8402) --- changelog.d/8402.misc | 1 + docs/postgres.md | 11 +++ .../storage/databases/main/registration.py | 3 + synapse/storage/databases/state/store.py | 3 + synapse/storage/util/id_generators.py | 5 ++ synapse/storage/util/sequence.py | 90 ++++++++++++++++++- tests/storage/test_id_generators.py | 22 ++++- tests/unittest.py | 31 ++++++- 8 files changed, 160 insertions(+), 6 deletions(-) create mode 100644 changelog.d/8402.misc diff --git a/changelog.d/8402.misc b/changelog.d/8402.misc new file mode 100644 index 0000000000..ad1804d207 --- /dev/null +++ b/changelog.d/8402.misc @@ -0,0 +1 @@ +Add checks on startup that PostgreSQL sequences are consistent with their associated tables. diff --git a/docs/postgres.md b/docs/postgres.md index e71a1975d8..c30cc1fd8c 100644 --- a/docs/postgres.md +++ b/docs/postgres.md @@ -106,6 +106,17 @@ Note that the above may fail with an error about duplicate rows if corruption has already occurred, and such duplicate rows will need to be manually removed. +## Fixing inconsistent sequences error + +Synapse uses Postgres sequences to generate IDs for various tables. A sequence +and associated table can get out of sync if, for example, Synapse has been +downgraded and then upgraded again. + +To fix the issue shut down Synapse (including any and all workers) and run the +SQL command included in the error message. Once done Synapse should start +successfully. + + ## Tuning Postgres The default settings should be fine for most deployments. For larger diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 48ce7ecd16..a83df7759d 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -41,6 +41,9 @@ class RegistrationWorkerStore(SQLBaseStore): self.config = hs.config self.clock = hs.get_clock() + # Note: we don't check this sequence for consistency as we'd have to + # call `find_max_generated_user_id_localpart` each time, which is + # expensive if there are many entries. self._user_id_seq = build_sequence_generator( database.engine, find_max_generated_user_id_localpart, "user_id_seq", ) diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index bec3780a32..989f0cbc9d 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -99,6 +99,9 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): self._state_group_seq_gen = build_sequence_generator( self.database_engine, get_max_state_group_txn, "state_group_id_seq" ) + self._state_group_seq_gen.check_consistency( + db_conn, table="state_groups", id_column="id" + ) @cached(max_entries=10000, iterable=True) async def get_state_group_delta(self, state_group): diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 4269eaf918..4fd7573e26 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -258,6 +258,11 @@ class MultiWriterIdGenerator: self._sequence_gen = PostgresSequenceGenerator(sequence_name) + # We check that the table and sequence haven't diverged. + self._sequence_gen.check_consistency( + db_conn, table=table, id_column=id_column, positive=positive + ) + # This goes and fills out the above state from the database. self._load_current_ids(db_conn, table, instance_column, id_column) diff --git a/synapse/storage/util/sequence.py b/synapse/storage/util/sequence.py index ffc1894748..2dd95e2709 100644 --- a/synapse/storage/util/sequence.py +++ b/synapse/storage/util/sequence.py @@ -13,11 +13,34 @@ # See the License for the specific language governing permissions and # limitations under the License. import abc +import logging import threading from typing import Callable, List, Optional -from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine -from synapse.storage.types import Cursor +from synapse.storage.engines import ( + BaseDatabaseEngine, + IncorrectDatabaseSetup, + PostgresEngine, +) +from synapse.storage.types import Connection, Cursor + +logger = logging.getLogger(__name__) + + +_INCONSISTENT_SEQUENCE_ERROR = """ +Postgres sequence '%(seq)s' is inconsistent with associated +table '%(table)s'. This can happen if Synapse has been downgraded and +then upgraded again, or due to a bad migration. + +To fix this error, shut down Synapse (including any and all workers) +and run the following SQL: + + SELECT setval('%(seq)s', ( + %(max_id_sql)s + )); + +See docs/postgres.md for more information. +""" class SequenceGenerator(metaclass=abc.ABCMeta): @@ -28,6 +51,19 @@ class SequenceGenerator(metaclass=abc.ABCMeta): """Gets the next ID in the sequence""" ... + @abc.abstractmethod + def check_consistency( + self, db_conn: Connection, table: str, id_column: str, positive: bool = True + ): + """Should be called during start up to test that the current value of + the sequence is greater than or equal to the maximum ID in the table. + + This is to handle various cases where the sequence value can get out + of sync with the table, e.g. if Synapse gets rolled back to a previous + version and the rolled forwards again. + """ + ... + class PostgresSequenceGenerator(SequenceGenerator): """An implementation of SequenceGenerator which uses a postgres sequence""" @@ -45,6 +81,50 @@ class PostgresSequenceGenerator(SequenceGenerator): ) return [i for (i,) in txn] + def check_consistency( + self, db_conn: Connection, table: str, id_column: str, positive: bool = True + ): + txn = db_conn.cursor() + + # First we get the current max ID from the table. + table_sql = "SELECT GREATEST(%(agg)s(%(id)s), 0) FROM %(table)s" % { + "id": id_column, + "table": table, + "agg": "MAX" if positive else "-MIN", + } + + txn.execute(table_sql) + row = txn.fetchone() + if not row: + # Table is empty, so nothing to do. + txn.close() + return + + # Now we fetch the current value from the sequence and compare with the + # above. + max_stream_id = row[0] + txn.execute( + "SELECT last_value, is_called FROM %(seq)s" % {"seq": self._sequence_name} + ) + last_value, is_called = txn.fetchone() + txn.close() + + # If `is_called` is False then `last_value` is actually the value that + # will be generated next, so we decrement to get the true "last value". + if not is_called: + last_value -= 1 + + if max_stream_id > last_value: + logger.warning( + "Postgres sequence %s is behind table %s: %d < %d", + last_value, + max_stream_id, + ) + raise IncorrectDatabaseSetup( + _INCONSISTENT_SEQUENCE_ERROR + % {"seq": self._sequence_name, "table": table, "max_id_sql": table_sql} + ) + GetFirstCallbackType = Callable[[Cursor], int] @@ -81,6 +161,12 @@ class LocalSequenceGenerator(SequenceGenerator): self._current_max_id += 1 return self._current_max_id + def check_consistency( + self, db_conn: Connection, table: str, id_column: str, positive: bool = True + ): + # There is nothing to do for in memory sequences + pass + def build_sequence_generator( database_engine: BaseDatabaseEngine, diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py index d4ff55fbff..4558bee7be 100644 --- a/tests/storage/test_id_generators.py +++ b/tests/storage/test_id_generators.py @@ -12,9 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - - from synapse.storage.database import DatabasePool +from synapse.storage.engines import IncorrectDatabaseSetup from synapse.storage.util.id_generators import MultiWriterIdGenerator from tests.unittest import HomeserverTestCase @@ -59,7 +58,7 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): writers=writers, ) - return self.get_success(self.db_pool.runWithConnection(_create)) + return self.get_success_or_raise(self.db_pool.runWithConnection(_create)) def _insert_rows(self, instance_name: str, number: int): """Insert N rows as the given instance, inserting with stream IDs pulled @@ -411,6 +410,23 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): self.get_success(_get_next_async()) self.assertEqual(id_gen_3.get_persisted_upto_position(), 6) + def test_sequence_consistency(self): + """Test that we error out if the table and sequence diverges. + """ + + # Prefill with some rows + self._insert_row_with_id("master", 3) + + # Now we add a row *without* updating the stream ID + def _insert(txn): + txn.execute("INSERT INTO foobar VALUES (26, 'master')") + + self.get_success(self.db_pool.runInteraction("_insert", _insert)) + + # Creating the ID gen should error + with self.assertRaises(IncorrectDatabaseSetup): + self._create_id_generator("first") + class BackwardsMultiWriterIdGeneratorTestCase(HomeserverTestCase): """Tests MultiWriterIdGenerator that produce *negative* stream IDs. diff --git a/tests/unittest.py b/tests/unittest.py index bbe50c3851..e654c0442d 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -14,7 +14,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import gc import hashlib import hmac @@ -28,6 +27,7 @@ from mock import Mock, patch from canonicaljson import json from twisted.internet.defer import Deferred, ensureDeferred, succeed +from twisted.python.failure import Failure from twisted.python.threadpool import ThreadPool from twisted.trial import unittest @@ -476,6 +476,35 @@ class HomeserverTestCase(TestCase): self.pump() return self.failureResultOf(d, exc) + def get_success_or_raise(self, d, by=0.0): + """Drive deferred to completion and return result or raise exception + on failure. + """ + + if inspect.isawaitable(d): + deferred = ensureDeferred(d) + if not isinstance(deferred, Deferred): + return d + + results = [] # type: list + deferred.addBoth(results.append) + + self.pump(by=by) + + if not results: + self.fail( + "Success result expected on {!r}, found no result instead".format( + deferred + ) + ) + + result = results[0] + + if isinstance(result, Failure): + result.raiseException() + + return result + def register_user(self, username, password, admin=False): """ Register a user. Requires the Admin API be registered. From d4605d1f16b5d71c72dbf07b1ffeaa81c0cb87a9 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Mon, 28 Sep 2020 18:46:59 +0100 Subject: [PATCH 057/134] Don't check whether a 3pid is allowed to register during password reset This endpoint should only deal with emails that have already been approved, and are attached with user's account. There's no need to re-check them here. --- synapse/rest/client/v2_alpha/account.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index c3ce0f6259..ed0d0772f8 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -96,13 +96,6 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): send_attempt = body["send_attempt"] next_link = body.get("next_link") # Optional param - if not check_3pid_allowed(self.hs, "email", email): - raise SynapseError( - 403, - "Your email domain is not authorized on this server", - Codes.THREEPID_DENIED, - ) - # Raise if the provided next_link value isn't valid assert_valid_next_link(self.hs, next_link) From fe443acaee36900757d79dbf7d2fb5629df38e3c Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Mon, 28 Sep 2020 18:51:41 +0100 Subject: [PATCH 058/134] Changelog --- changelog.d/8414.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/8414.bugfix diff --git a/changelog.d/8414.bugfix b/changelog.d/8414.bugfix new file mode 100644 index 0000000000..315876e892 --- /dev/null +++ b/changelog.d/8414.bugfix @@ -0,0 +1 @@ +Remove unnecessary 3PID registration check when resetting password via an email address. Bug introduced in v0.34.0rc2. \ No newline at end of file From 1c262431f9bf768d106bf79a568479fa5a0784a1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 29 Sep 2020 10:29:21 +0100 Subject: [PATCH 059/134] Fix handling of connection timeouts in outgoing http requests (#8400) * Remove `on_timeout_cancel` from `timeout_deferred` The `on_timeout_cancel` param to `timeout_deferred` wasn't always called on a timeout (in particular if the canceller raised an exception), so it was unreliable. It was also only used in one place, and to be honest it's easier to do what it does a different way. * Fix handling of connection timeouts in outgoing http requests Turns out that if we get a timeout during connection, then a different exception is raised, which wasn't always handled correctly. To fix it, catch the exception in SimpleHttpClient and turn it into a RequestTimedOutError (which is already a documented exception). Also add a description to RequestTimedOutError so that we can see which stage it failed at. * Fix incorrect handling of timeouts reading federation responses This was trapping the wrong sort of TimeoutError, so was never being hit. The effect was relatively minor, but we should fix this so that it does the expected thing. * Fix inconsistent handling of `timeout` param between methods `get_json`, `put_json` and `delete_json` were applying a different timeout to the response body to `post_json`; bring them in line and test. Co-authored-by: Patrick Cloke Co-authored-by: Erik Johnston --- changelog.d/8400.bugfix | 1 + synapse/handlers/identity.py | 25 ++-- synapse/http/__init__.py | 17 +-- synapse/http/client.py | 54 +++++--- synapse/http/matrixfederationclient.py | 55 ++++++-- synapse/http/proxyagent.py | 16 ++- synapse/util/async_helpers.py | 47 +++---- tests/http/test_fedclient.py | 14 +- tests/http/test_simple_client.py | 180 +++++++++++++++++++++++++ 9 files changed, 311 insertions(+), 98 deletions(-) create mode 100644 changelog.d/8400.bugfix create mode 100644 tests/http/test_simple_client.py diff --git a/changelog.d/8400.bugfix b/changelog.d/8400.bugfix new file mode 100644 index 0000000000..835658ba5e --- /dev/null +++ b/changelog.d/8400.bugfix @@ -0,0 +1 @@ +Fix incorrect handling of timeouts on outgoing HTTP requests. diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index ab15570f7a..bc3e9607ca 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -21,8 +21,6 @@ import logging import urllib.parse from typing import Awaitable, Callable, Dict, List, Optional, Tuple -from twisted.internet.error import TimeoutError - from synapse.api.errors import ( CodeMessageException, Codes, @@ -30,6 +28,7 @@ from synapse.api.errors import ( SynapseError, ) from synapse.config.emailconfig import ThreepidBehaviour +from synapse.http import RequestTimedOutError from synapse.http.client import SimpleHttpClient from synapse.types import JsonDict, Requester from synapse.util import json_decoder @@ -93,7 +92,7 @@ class IdentityHandler(BaseHandler): try: data = await self.http_client.get_json(url, query_params) - except TimeoutError: + except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") except HttpResponseException as e: logger.info( @@ -173,7 +172,7 @@ class IdentityHandler(BaseHandler): if e.code != 404 or not use_v2: logger.error("3PID bind failed with Matrix error: %r", e) raise e.to_synapse_error() - except TimeoutError: + except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") except CodeMessageException as e: data = json_decoder.decode(e.msg) # XXX WAT? @@ -273,7 +272,7 @@ class IdentityHandler(BaseHandler): else: logger.error("Failed to unbind threepid on identity server: %s", e) raise SynapseError(500, "Failed to contact identity server") - except TimeoutError: + except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") await self.store.remove_user_bound_threepid( @@ -419,7 +418,7 @@ class IdentityHandler(BaseHandler): except HttpResponseException as e: logger.info("Proxied requestToken failed: %r", e) raise e.to_synapse_error() - except TimeoutError: + except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") async def requestMsisdnToken( @@ -471,7 +470,7 @@ class IdentityHandler(BaseHandler): except HttpResponseException as e: logger.info("Proxied requestToken failed: %r", e) raise e.to_synapse_error() - except TimeoutError: + except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") assert self.hs.config.public_baseurl @@ -553,7 +552,7 @@ class IdentityHandler(BaseHandler): id_server + "/_matrix/identity/api/v1/validate/msisdn/submitToken", body, ) - except TimeoutError: + except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") except HttpResponseException as e: logger.warning("Error contacting msisdn account_threepid_delegate: %s", e) @@ -627,7 +626,7 @@ class IdentityHandler(BaseHandler): # require or validate it. See the following for context: # https://github.com/matrix-org/synapse/issues/5253#issuecomment-666246950 return data["mxid"] - except TimeoutError: + except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") except IOError as e: logger.warning("Error from v1 identity server lookup: %s" % (e,)) @@ -655,7 +654,7 @@ class IdentityHandler(BaseHandler): "%s%s/_matrix/identity/v2/hash_details" % (id_server_scheme, id_server), {"access_token": id_access_token}, ) - except TimeoutError: + except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") if not isinstance(hash_details, dict): @@ -727,7 +726,7 @@ class IdentityHandler(BaseHandler): }, headers=headers, ) - except TimeoutError: + except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") except Exception as e: logger.warning("Error when performing a v2 3pid lookup: %s", e) @@ -823,7 +822,7 @@ class IdentityHandler(BaseHandler): invite_config, {"Authorization": create_id_access_token_header(id_access_token)}, ) - except TimeoutError: + except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") except HttpResponseException as e: if e.code != 404: @@ -841,7 +840,7 @@ class IdentityHandler(BaseHandler): data = await self.blacklisting_http_client.post_json_get_json( url, invite_config ) - except TimeoutError: + except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") except HttpResponseException as e: logger.warning( diff --git a/synapse/http/__init__.py b/synapse/http/__init__.py index 8eb3638591..59b01b812c 100644 --- a/synapse/http/__init__.py +++ b/synapse/http/__init__.py @@ -16,8 +16,6 @@ import re from twisted.internet import task -from twisted.internet.defer import CancelledError -from twisted.python import failure from twisted.web.client import FileBodyProducer from synapse.api.errors import SynapseError @@ -26,19 +24,8 @@ from synapse.api.errors import SynapseError class RequestTimedOutError(SynapseError): """Exception representing timeout of an outbound request""" - def __init__(self): - super().__init__(504, "Timed out") - - -def cancelled_to_request_timed_out_error(value, timeout): - """Turns CancelledErrors into RequestTimedOutErrors. - - For use with async.add_timeout_to_deferred - """ - if isinstance(value, failure.Failure): - value.trap(CancelledError) - raise RequestTimedOutError() - return value + def __init__(self, msg): + super().__init__(504, msg) ACCESS_TOKEN_RE = re.compile(r"(\?.*access(_|%5[Ff])token=)[^&]*(.*)$") diff --git a/synapse/http/client.py b/synapse/http/client.py index 4694adc400..8324632cb6 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -13,7 +13,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import logging import urllib from io import BytesIO @@ -38,7 +37,7 @@ from zope.interface import implementer, provider from OpenSSL import SSL from OpenSSL.SSL import VERIFY_NONE -from twisted.internet import defer, protocol, ssl +from twisted.internet import defer, error as twisted_error, protocol, ssl from twisted.internet.interfaces import ( IReactorPluggableNameResolver, IResolutionReceiver, @@ -46,17 +45,18 @@ from twisted.internet.interfaces import ( from twisted.internet.task import Cooperator from twisted.python.failure import Failure from twisted.web._newclient import ResponseDone -from twisted.web.client import Agent, HTTPConnectionPool, readBody +from twisted.web.client import ( + Agent, + HTTPConnectionPool, + ResponseNeverReceived, + readBody, +) from twisted.web.http import PotentialDataLoss from twisted.web.http_headers import Headers from twisted.web.iweb import IResponse from synapse.api.errors import Codes, HttpResponseException, SynapseError -from synapse.http import ( - QuieterFileBodyProducer, - cancelled_to_request_timed_out_error, - redact_uri, -) +from synapse.http import QuieterFileBodyProducer, RequestTimedOutError, redact_uri from synapse.http.proxyagent import ProxyAgent from synapse.logging.context import make_deferred_yieldable from synapse.logging.opentracing import set_tag, start_active_span, tags @@ -332,8 +332,6 @@ class SimpleHttpClient: RequestTimedOutError if the request times out before the headers are read """ - # A small wrapper around self.agent.request() so we can easily attach - # counters to it outgoing_requests_counter.labels(method).inc() # log request but strip `access_token` (AS requests for example include this) @@ -362,15 +360,17 @@ class SimpleHttpClient: data=body_producer, headers=headers, **self._extra_treq_args - ) + ) # type: defer.Deferred + # we use our own timeout mechanism rather than treq's as a workaround # for https://twistedmatrix.com/trac/ticket/9534. request_deferred = timeout_deferred( - request_deferred, - 60, - self.hs.get_reactor(), - cancelled_to_request_timed_out_error, + request_deferred, 60, self.hs.get_reactor(), ) + + # turn timeouts into RequestTimedOutErrors + request_deferred.addErrback(_timeout_to_request_timed_out_error) + response = await make_deferred_yieldable(request_deferred) incoming_responses_counter.labels(method, response.code).inc() @@ -410,7 +410,7 @@ class SimpleHttpClient: parsed json Raises: - RequestTimedOutException: if there is a timeout before the response headers + RequestTimedOutError: if there is a timeout before the response headers are received. Note there is currently no timeout on reading the response body. @@ -461,7 +461,7 @@ class SimpleHttpClient: parsed json Raises: - RequestTimedOutException: if there is a timeout before the response headers + RequestTimedOutError: if there is a timeout before the response headers are received. Note there is currently no timeout on reading the response body. @@ -506,7 +506,7 @@ class SimpleHttpClient: Returns: Succeeds when we get a 2xx HTTP response, with the HTTP body as JSON. Raises: - RequestTimedOutException: if there is a timeout before the response headers + RequestTimedOutError: if there is a timeout before the response headers are received. Note there is currently no timeout on reading the response body. @@ -538,7 +538,7 @@ class SimpleHttpClient: Returns: Succeeds when we get a 2xx HTTP response, with the HTTP body as JSON. Raises: - RequestTimedOutException: if there is a timeout before the response headers + RequestTimedOutError: if there is a timeout before the response headers are received. Note there is currently no timeout on reading the response body. @@ -586,7 +586,7 @@ class SimpleHttpClient: Succeeds when we get a 2xx HTTP response, with the HTTP body as bytes. Raises: - RequestTimedOutException: if there is a timeout before the response headers + RequestTimedOutError: if there is a timeout before the response headers are received. Note there is currently no timeout on reading the response body. @@ -631,7 +631,7 @@ class SimpleHttpClient: headers, absolute URI of the response and HTTP response code. Raises: - RequestTimedOutException: if there is a timeout before the response headers + RequestTimedOutError: if there is a timeout before the response headers are received. Note there is currently no timeout on reading the response body. @@ -684,6 +684,18 @@ class SimpleHttpClient: ) +def _timeout_to_request_timed_out_error(f: Failure): + if f.check(twisted_error.TimeoutError, twisted_error.ConnectingCancelledError): + # The TCP connection has its own timeout (set by the 'connectTimeout' param + # on the Agent), which raises twisted_error.TimeoutError exception. + raise RequestTimedOutError("Timeout connecting to remote server") + elif f.check(defer.TimeoutError, ResponseNeverReceived): + # this one means that we hit our overall timeout on the request + raise RequestTimedOutError("Timeout waiting for response from remote server") + + return f + + # XXX: FIXME: This is horribly copy-pasted from matrixfederationclient. # The two should be factored out. diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index b02c74ab2d..c23a4d7c0c 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -171,7 +171,7 @@ async def _handle_json_response( d = timeout_deferred(d, timeout=timeout_sec, reactor=reactor) body = await make_deferred_yieldable(d) - except TimeoutError as e: + except defer.TimeoutError as e: logger.warning( "{%s} [%s] Timed out reading response - %s %s", request.txn_id, @@ -655,10 +655,14 @@ class MatrixFederationHttpClient: long_retries (bool): whether to use the long retry algorithm. See docs on _send_request for details. - timeout (int|None): number of milliseconds to wait for the response headers - (including connecting to the server), *for each attempt*. + timeout (int|None): number of milliseconds to wait for the response. self._default_timeout (60s) by default. + Note that we may make several attempts to send the request; this + timeout applies to the time spent waiting for response headers for + *each* attempt (including connection time) as well as the time spent + reading the response body after a 200 response. + ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. backoff_on_404 (bool): True if we should count a 404 response as @@ -704,8 +708,13 @@ class MatrixFederationHttpClient: timeout=timeout, ) + if timeout is not None: + _sec_timeout = timeout / 1000 + else: + _sec_timeout = self.default_timeout + body = await _handle_json_response( - self.reactor, self.default_timeout, request, response, start_ms + self.reactor, _sec_timeout, request, response, start_ms ) return body @@ -734,10 +743,14 @@ class MatrixFederationHttpClient: long_retries (bool): whether to use the long retry algorithm. See docs on _send_request for details. - timeout (int|None): number of milliseconds to wait for the response headers - (including connecting to the server), *for each attempt*. + timeout (int|None): number of milliseconds to wait for the response. self._default_timeout (60s) by default. + Note that we may make several attempts to send the request; this + timeout applies to the time spent waiting for response headers for + *each* attempt (including connection time) as well as the time spent + reading the response body after a 200 response. + ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. @@ -801,10 +814,14 @@ class MatrixFederationHttpClient: args (dict|None): A dictionary used to create query strings, defaults to None. - timeout (int|None): number of milliseconds to wait for the response headers - (including connecting to the server), *for each attempt*. + timeout (int|None): number of milliseconds to wait for the response. self._default_timeout (60s) by default. + Note that we may make several attempts to send the request; this + timeout applies to the time spent waiting for response headers for + *each* attempt (including connection time) as well as the time spent + reading the response body after a 200 response. + ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. @@ -840,8 +857,13 @@ class MatrixFederationHttpClient: timeout=timeout, ) + if timeout is not None: + _sec_timeout = timeout / 1000 + else: + _sec_timeout = self.default_timeout + body = await _handle_json_response( - self.reactor, self.default_timeout, request, response, start_ms + self.reactor, _sec_timeout, request, response, start_ms ) return body @@ -865,10 +887,14 @@ class MatrixFederationHttpClient: long_retries (bool): whether to use the long retry algorithm. See docs on _send_request for details. - timeout (int|None): number of milliseconds to wait for the response headers - (including connecting to the server), *for each attempt*. + timeout (int|None): number of milliseconds to wait for the response. self._default_timeout (60s) by default. + Note that we may make several attempts to send the request; this + timeout applies to the time spent waiting for response headers for + *each* attempt (including connection time) as well as the time spent + reading the response body after a 200 response. + ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. @@ -900,8 +926,13 @@ class MatrixFederationHttpClient: ignore_backoff=ignore_backoff, ) + if timeout is not None: + _sec_timeout = timeout / 1000 + else: + _sec_timeout = self.default_timeout + body = await _handle_json_response( - self.reactor, self.default_timeout, request, response, start_ms + self.reactor, _sec_timeout, request, response, start_ms ) return body diff --git a/synapse/http/proxyagent.py b/synapse/http/proxyagent.py index 332da02a8d..e32d3f43e0 100644 --- a/synapse/http/proxyagent.py +++ b/synapse/http/proxyagent.py @@ -44,8 +44,11 @@ class ProxyAgent(_AgentBase): `BrowserLikePolicyForHTTPS`, so unless you have special requirements you can leave this as-is. - connectTimeout (float): The amount of time that this Agent will wait - for the peer to accept a connection. + connectTimeout (Optional[float]): The amount of time that this Agent will wait + for the peer to accept a connection, in seconds. If 'None', + HostnameEndpoint's default (30s) will be used. + + This is used for connections to both proxies and destination servers. bindAddress (bytes): The local address for client sockets to bind to. @@ -108,6 +111,15 @@ class ProxyAgent(_AgentBase): Returns: Deferred[IResponse]: completes when the header of the response has been received (regardless of the response status code). + + Can fail with: + SchemeNotSupported: if the uri is not http or https + + twisted.internet.error.TimeoutError if the server we are connecting + to (proxy or destination) does not accept a connection before + connectTimeout. + + ... other things too. """ uri = uri.strip() if not _VALID_URI.match(uri): diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 67ce9a5f39..382f0cf3f0 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -449,18 +449,8 @@ class ReadWriteLock: R = TypeVar("R") -def _cancelled_to_timed_out_error(value: R, timeout: float) -> R: - if isinstance(value, failure.Failure): - value.trap(CancelledError) - raise defer.TimeoutError(timeout, "Deferred") - return value - - def timeout_deferred( - deferred: defer.Deferred, - timeout: float, - reactor: IReactorTime, - on_timeout_cancel: Optional[Callable[[Any, float], Any]] = None, + deferred: defer.Deferred, timeout: float, reactor: IReactorTime, ) -> defer.Deferred: """The in built twisted `Deferred.addTimeout` fails to time out deferreds that have a canceller that throws exceptions. This method creates a new @@ -469,27 +459,21 @@ def timeout_deferred( (See https://twistedmatrix.com/trac/ticket/9534) - NOTE: Unlike `Deferred.addTimeout`, this function returns a new deferred + NOTE: Unlike `Deferred.addTimeout`, this function returns a new deferred. + + NOTE: the TimeoutError raised by the resultant deferred is + twisted.internet.defer.TimeoutError, which is *different* to the built-in + TimeoutError, as well as various other TimeoutErrors you might have imported. Args: deferred: The Deferred to potentially timeout. timeout: Timeout in seconds reactor: The twisted reactor to use - on_timeout_cancel: A callable which is called immediately - after the deferred times out, and not if this deferred is - otherwise cancelled before the timeout. - It takes an arbitrary value, which is the value of the deferred at - that exact point in time (probably a CancelledError Failure), and - the timeout. - - The default callable (if none is provided) will translate a - CancelledError Failure into a defer.TimeoutError. Returns: - A new Deferred. + A new Deferred, which will errback with defer.TimeoutError on timeout. """ - new_d = defer.Deferred() timed_out = [False] @@ -502,18 +486,23 @@ def timeout_deferred( except: # noqa: E722, if we throw any exception it'll break time outs logger.exception("Canceller failed during timeout") + # the cancel() call should have set off a chain of errbacks which + # will have errbacked new_d, but in case it hasn't, errback it now. + if not new_d.called: - new_d.errback(defer.TimeoutError(timeout, "Deferred")) + new_d.errback(defer.TimeoutError("Timed out after %gs" % (timeout,))) delayed_call = reactor.callLater(timeout, time_it_out) - def convert_cancelled(value): - if timed_out[0]: - to_call = on_timeout_cancel or _cancelled_to_timed_out_error - return to_call(value, timeout) + def convert_cancelled(value: failure.Failure): + # if the orgininal deferred was cancelled, and our timeout has fired, then + # the reason it was cancelled was due to our timeout. Turn the CancelledError + # into a TimeoutError. + if timed_out[0] and value.check(CancelledError): + raise defer.TimeoutError("Timed out after %gs" % (timeout,)) return value - deferred.addBoth(convert_cancelled) + deferred.addErrback(convert_cancelled) def cancel_timeout(result): # stop the pending call to cancel the deferred if it's been fired diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py index 5604af3795..212484a7fe 100644 --- a/tests/http/test_fedclient.py +++ b/tests/http/test_fedclient.py @@ -318,14 +318,14 @@ class FederationClientTests(HomeserverTestCase): r = self.successResultOf(d) self.assertEqual(r.code, 200) - def test_client_headers_no_body(self): + @parameterized.expand(["get_json", "post_json", "delete_json", "put_json"]) + def test_timeout_reading_body(self, method_name: str): """ If the HTTP request is connected, but gets no response before being - timed out, it'll give a ResponseNeverReceived. + timed out, it'll give a RequestSendFailed with can_retry. """ - d = defer.ensureDeferred( - self.cl.post_json("testserv:8008", "foo/bar", timeout=10000) - ) + method = getattr(self.cl, method_name) + d = defer.ensureDeferred(method("testserv:8008", "foo/bar", timeout=10000)) self.pump() @@ -349,7 +349,9 @@ class FederationClientTests(HomeserverTestCase): self.reactor.advance(10.5) f = self.failureResultOf(d) - self.assertIsInstance(f.value, TimeoutError) + self.assertIsInstance(f.value, RequestSendFailed) + self.assertTrue(f.value.can_retry) + self.assertIsInstance(f.value.inner_exception, defer.TimeoutError) def test_client_requires_trailing_slashes(self): """ diff --git a/tests/http/test_simple_client.py b/tests/http/test_simple_client.py new file mode 100644 index 0000000000..a1cf0862d4 --- /dev/null +++ b/tests/http/test_simple_client.py @@ -0,0 +1,180 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from mock import Mock + +from netaddr import IPSet + +from twisted.internet import defer +from twisted.internet.error import DNSLookupError + +from synapse.http import RequestTimedOutError +from synapse.http.client import SimpleHttpClient +from synapse.server import HomeServer + +from tests.unittest import HomeserverTestCase + + +class SimpleHttpClientTests(HomeserverTestCase): + def prepare(self, reactor, clock, hs: "HomeServer"): + # Add a DNS entry for a test server + self.reactor.lookups["testserv"] = "1.2.3.4" + + self.cl = hs.get_simple_http_client() + + def test_dns_error(self): + """ + If the DNS lookup returns an error, it will bubble up. + """ + d = defer.ensureDeferred(self.cl.get_json("http://testserv2:8008/foo/bar")) + self.pump() + + f = self.failureResultOf(d) + self.assertIsInstance(f.value, DNSLookupError) + + def test_client_connection_refused(self): + d = defer.ensureDeferred(self.cl.get_json("http://testserv:8008/foo/bar")) + + self.pump() + + # Nothing happened yet + self.assertNoResult(d) + + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + (host, port, factory, _timeout, _bindAddress) = clients[0] + self.assertEqual(host, "1.2.3.4") + self.assertEqual(port, 8008) + e = Exception("go away") + factory.clientConnectionFailed(None, e) + self.pump(0.5) + + f = self.failureResultOf(d) + + self.assertIs(f.value, e) + + def test_client_never_connect(self): + """ + If the HTTP request is not connected and is timed out, it'll give a + ConnectingCancelledError or TimeoutError. + """ + d = defer.ensureDeferred(self.cl.get_json("http://testserv:8008/foo/bar")) + + self.pump() + + # Nothing happened yet + self.assertNoResult(d) + + # Make sure treq is trying to connect + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + self.assertEqual(clients[0][0], "1.2.3.4") + self.assertEqual(clients[0][1], 8008) + + # Deferred is still without a result + self.assertNoResult(d) + + # Push by enough to time it out + self.reactor.advance(120) + f = self.failureResultOf(d) + + self.assertIsInstance(f.value, RequestTimedOutError) + + def test_client_connect_no_response(self): + """ + If the HTTP request is connected, but gets no response before being + timed out, it'll give a ResponseNeverReceived. + """ + d = defer.ensureDeferred(self.cl.get_json("http://testserv:8008/foo/bar")) + + self.pump() + + # Nothing happened yet + self.assertNoResult(d) + + # Make sure treq is trying to connect + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + self.assertEqual(clients[0][0], "1.2.3.4") + self.assertEqual(clients[0][1], 8008) + + conn = Mock() + client = clients[0][2].buildProtocol(None) + client.makeConnection(conn) + + # Deferred is still without a result + self.assertNoResult(d) + + # Push by enough to time it out + self.reactor.advance(120) + f = self.failureResultOf(d) + + self.assertIsInstance(f.value, RequestTimedOutError) + + def test_client_ip_range_blacklist(self): + """Ensure that Synapse does not try to connect to blacklisted IPs""" + + # Add some DNS entries we'll blacklist + self.reactor.lookups["internal"] = "127.0.0.1" + self.reactor.lookups["internalv6"] = "fe80:0:0:0:0:8a2e:370:7337" + ip_blacklist = IPSet(["127.0.0.0/8", "fe80::/64"]) + + cl = SimpleHttpClient(self.hs, ip_blacklist=ip_blacklist) + + # Try making a GET request to a blacklisted IPv4 address + # ------------------------------------------------------ + # Make the request + d = defer.ensureDeferred(cl.get_json("http://internal:8008/foo/bar")) + self.pump(1) + + # Check that it was unable to resolve the address + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 0) + + self.failureResultOf(d, DNSLookupError) + + # Try making a POST request to a blacklisted IPv6 address + # ------------------------------------------------------- + # Make the request + d = defer.ensureDeferred( + cl.post_json_get_json("http://internalv6:8008/foo/bar", {}) + ) + + # Move the reactor forwards + self.pump(1) + + # Check that it was unable to resolve the address + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 0) + + # Check that it was due to a blacklisted DNS lookup + self.failureResultOf(d, DNSLookupError) + + # Try making a GET request to a non-blacklisted IPv4 address + # ---------------------------------------------------------- + # Make the request + d = defer.ensureDeferred(cl.get_json("http://testserv:8008/foo/bar")) + + # Nothing has happened yet + self.assertNoResult(d) + + # Move the reactor forwards + self.pump(1) + + # Check that it was able to resolve the address + clients = self.reactor.tcpClients + self.assertNotEqual(len(clients), 0) + + # Connection will still fail as this IP address does not resolve to anything + self.failureResultOf(d, RequestTimedOutError) From 866c84da8ddda04f1da94b8e03719147c6d4875e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 29 Sep 2020 11:06:11 +0100 Subject: [PATCH 060/134] Add metrics to track success/otherwise of replication requests (#8406) One hope is that this might provide some insights into #3365. --- changelog.d/8406.feature | 1 + synapse/replication/http/_base.py | 40 +++++++++++++++++++++---------- 2 files changed, 29 insertions(+), 12 deletions(-) create mode 100644 changelog.d/8406.feature diff --git a/changelog.d/8406.feature b/changelog.d/8406.feature new file mode 100644 index 0000000000..1c6472ae7e --- /dev/null +++ b/changelog.d/8406.feature @@ -0,0 +1 @@ +Add prometheus metrics for replication requests. diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index b448da6710..64edadb624 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -20,18 +20,28 @@ import urllib from inspect import signature from typing import Dict, List, Tuple -from synapse.api.errors import ( - CodeMessageException, - HttpResponseException, - RequestSendFailed, - SynapseError, -) +from prometheus_client import Counter, Gauge + +from synapse.api.errors import HttpResponseException, SynapseError +from synapse.http import RequestTimedOutError from synapse.logging.opentracing import inject_active_span_byte_dict, trace from synapse.util.caches.response_cache import ResponseCache from synapse.util.stringutils import random_string logger = logging.getLogger(__name__) +_pending_outgoing_requests = Gauge( + "synapse_pending_outgoing_replication_requests", + "Number of active outgoing replication requests, by replication method name", + ["name"], +) + +_outgoing_request_counter = Counter( + "synapse_outgoing_replication_requests", + "Number of outgoing replication requests, by replication method name and result", + ["name", "code"], +) + class ReplicationEndpoint(metaclass=abc.ABCMeta): """Helper base class for defining new replication HTTP endpoints. @@ -138,7 +148,10 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): instance_map = hs.config.worker.instance_map + outgoing_gauge = _pending_outgoing_requests.labels(cls.NAME) + @trace(opname="outgoing_replication_request") + @outgoing_gauge.track_inprogress() async def send_request(instance_name="master", **kwargs): if instance_name == local_instance_name: raise Exception("Trying to send HTTP request to self") @@ -193,23 +206,26 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): try: result = await request_func(uri, data, headers=headers) break - except CodeMessageException as e: - if e.code != 504 or not cls.RETRY_ON_TIMEOUT: + except RequestTimedOutError: + if not cls.RETRY_ON_TIMEOUT: raise - logger.warning("%s request timed out", cls.NAME) + logger.warning("%s request timed out; retrying", cls.NAME) # If we timed out we probably don't need to worry about backing # off too much, but lets just wait a little anyway. await clock.sleep(1) except HttpResponseException as e: # We convert to SynapseError as we know that it was a SynapseError - # on the master process that we should send to the client. (And + # on the main process that we should send to the client. (And # importantly, not stack traces everywhere) + _outgoing_request_counter.labels(cls.NAME, e.code).inc() raise e.to_synapse_error() - except RequestSendFailed as e: - raise SynapseError(502, "Failed to talk to master") from e + except Exception as e: + _outgoing_request_counter.labels(cls.NAME, "ERR").inc() + raise SynapseError(502, "Failed to talk to main process") from e + _outgoing_request_counter.labels(cls.NAME, 200).inc() return result return send_request From 1c6b8752b891c1a25524d8dfaa8efb7176c0dbec Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 29 Sep 2020 12:36:44 +0100 Subject: [PATCH 061/134] Only assert valid next_link params when provided (#8417) Broken in https://github.com/matrix-org/synapse/pull/8275 and has yet to be put in a release. Fixes https://github.com/matrix-org/synapse/issues/8418. `next_link` is an optional parameter. However, we were checking whether the `next_link` param was valid, even if it wasn't provided. In that case, `next_link` was `None`, which would clearly not be a valid URL. This would prevent password reset and other operations if `next_link` was not provided, and the `next_link_domain_whitelist` config option was set. --- changelog.d/8417.feature | 1 + synapse/rest/client/v2_alpha/account.py | 15 +++++++++------ tests/rest/client/v2_alpha/test_account.py | 6 ++++++ 3 files changed, 16 insertions(+), 6 deletions(-) create mode 100644 changelog.d/8417.feature diff --git a/changelog.d/8417.feature b/changelog.d/8417.feature new file mode 100644 index 0000000000..17549c3df3 --- /dev/null +++ b/changelog.d/8417.feature @@ -0,0 +1 @@ +Add a config option to specify a whitelist of domains that a user can be redirected to after validating their email or phone number. \ No newline at end of file diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index c3ce0f6259..9245214f36 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -103,8 +103,9 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): Codes.THREEPID_DENIED, ) - # Raise if the provided next_link value isn't valid - assert_valid_next_link(self.hs, next_link) + if next_link: + # Raise if the provided next_link value isn't valid + assert_valid_next_link(self.hs, next_link) # The email will be sent to the stored address. # This avoids a potential account hijack by requesting a password reset to @@ -379,8 +380,9 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): Codes.THREEPID_DENIED, ) - # Raise if the provided next_link value isn't valid - assert_valid_next_link(self.hs, next_link) + if next_link: + # Raise if the provided next_link value isn't valid + assert_valid_next_link(self.hs, next_link) existing_user_id = await self.store.get_user_id_by_threepid("email", email) @@ -453,8 +455,9 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet): Codes.THREEPID_DENIED, ) - # Raise if the provided next_link value isn't valid - assert_valid_next_link(self.hs, next_link) + if next_link: + # Raise if the provided next_link value isn't valid + assert_valid_next_link(self.hs, next_link) existing_user_id = await self.store.get_user_id_by_threepid("msisdn", msisdn) diff --git a/tests/rest/client/v2_alpha/test_account.py b/tests/rest/client/v2_alpha/test_account.py index 93f899d861..ae2cd67f35 100644 --- a/tests/rest/client/v2_alpha/test_account.py +++ b/tests/rest/client/v2_alpha/test_account.py @@ -732,6 +732,12 @@ class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): @override_config({"next_link_domain_whitelist": ["example.com", "example.org"]}) def test_next_link_domain_whitelist(self): """Tests next_link parameters must fit the whitelist if provided""" + + # Ensure not providing a next_link parameter still works + self._request_token( + "something@example.com", "some_secret", next_link=None, expect_code=200, + ) + self._request_token( "something@example.com", "some_secret", From 8676d8ab2e5667d7c12774effc64b3ab99344a8d Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Tue, 29 Sep 2020 13:11:02 +0100 Subject: [PATCH 062/134] Filter out appservices from mau count (#8404) This is an attempt to fix #8403. --- changelog.d/8404.misc | 1 + .../databases/main/monthly_active_users.py | 9 ++++++++- tests/storage/test_monthly_active_users.py | 17 ++++++++++++++++- 3 files changed, 25 insertions(+), 2 deletions(-) create mode 100644 changelog.d/8404.misc diff --git a/changelog.d/8404.misc b/changelog.d/8404.misc new file mode 100644 index 0000000000..7aadded6c1 --- /dev/null +++ b/changelog.d/8404.misc @@ -0,0 +1 @@ +Do not include appservice users when calculating the total MAU for a server. diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py index e0cedd1aac..e93aad33cd 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py @@ -41,7 +41,14 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore): """ def _count_users(txn): - sql = "SELECT COALESCE(count(*), 0) FROM monthly_active_users" + # Exclude app service users + sql = """ + SELECT COALESCE(count(*), 0) + FROM monthly_active_users + LEFT JOIN users + ON monthly_active_users.user_id=users.name + WHERE (users.appservice_id IS NULL OR users.appservice_id = ''); + """ txn.execute(sql) (count,) = txn.fetchone() return count diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index 643072bbaf..8d97b6d4cd 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -137,6 +137,21 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): count = self.get_success(self.store.get_monthly_active_count()) self.assertEqual(count, 1) + def test_appservice_user_not_counted_in_mau(self): + self.get_success( + self.store.register_user( + user_id="@appservice_user:server", appservice_id="wibble" + ) + ) + count = self.get_success(self.store.get_monthly_active_count()) + self.assertEqual(count, 0) + + d = self.store.upsert_monthly_active_user("@appservice_user:server") + self.get_success(d) + + count = self.get_success(self.store.get_monthly_active_count()) + self.assertEqual(count, 0) + def test_user_last_seen_monthly_active(self): user_id1 = "@user1:server" user_id2 = "@user2:server" @@ -383,7 +398,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): self.get_success(self.store.upsert_monthly_active_user(appservice2_user1)) count = self.get_success(self.store.get_monthly_active_count()) - self.assertEqual(count, 4) + self.assertEqual(count, 1) d = self.store.get_monthly_active_count_by_service() result = self.get_success(d) From 12f0d18611f406df5e741c124cac8246fcfd9c14 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 29 Sep 2020 13:47:47 +0100 Subject: [PATCH 063/134] Add support for running Complement against the local checkout (#8317) This PR adds a script that: * Builds the local Synapse checkout using our existing `docker/Dockerfile` image. * Downloads [Complement](https://github.com/matrix-org/complement/)'s source code. * Builds the [Synapse.Dockerfile](https://github.com/matrix-org/complement/blob/master/dockerfiles/Synapse.Dockerfile) using the above dockerfile as a base. * Builds and runs Complement against it. This set up differs slightly from [that of the dendrite repo](https://github.com/matrix-org/dendrite/blob/master/build/scripts/complement.sh) (`complement.sh`, `Complement.Dockerfile`), which instead stores a separate, but slightly modified, dockerfile in Dendrite's repo rather than running the one stored in Complement's repo. That synapse equivalent to that dockerfile (`Synapse.Dockerfile`) in Complement's repo is just based on top of `matrixdotorg/synapse:latest`, which we opt to build here locally. Thus copying over the files from Complement's repo wouldn't change any functionality, and would result in two instances of the same files. So just using the dockerfile in Complement's repo was decided upon instead. --- changelog.d/8317.feature | 1 + scripts-dev/complement.sh | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+) create mode 100644 changelog.d/8317.feature create mode 100755 scripts-dev/complement.sh diff --git a/changelog.d/8317.feature b/changelog.d/8317.feature new file mode 100644 index 0000000000..f9edda099c --- /dev/null +++ b/changelog.d/8317.feature @@ -0,0 +1 @@ +Support testing the local Synapse checkout against the [Complement homeserver test suite](https://github.com/matrix-org/complement/). \ No newline at end of file diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh new file mode 100755 index 0000000000..3cde53f5c0 --- /dev/null +++ b/scripts-dev/complement.sh @@ -0,0 +1,22 @@ +#! /bin/bash -eu +# This script is designed for developers who want to test their code +# against Complement. +# +# It makes a Synapse image which represents the current checkout, +# then downloads Complement and runs it with that image. + +cd "$(dirname $0)/.." + +# Build the base Synapse image from the local checkout +docker build -t matrixdotorg/synapse:latest -f docker/Dockerfile . + +# Download Complement +wget -N https://github.com/matrix-org/complement/archive/master.tar.gz +tar -xzf master.tar.gz +cd complement-master + +# Build the Synapse image from Complement, based on the above image we just built +docker build -t complement-synapse -f dockerfiles/Synapse.Dockerfile ./dockerfiles + +# Run the tests on the resulting image! +COMPLEMENT_BASE_IMAGE=complement-synapse go test -v -count=1 ./tests From 2649d545a551dd126d73d34a6e3172916ea483e0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 29 Sep 2020 15:57:36 +0100 Subject: [PATCH 064/134] Mypy fixes for `synapse.handlers.federation` (#8422) For some reason, an apparently unrelated PR upset mypy about this module. Here are a number of little fixes. --- changelog.d/8422.misc | 1 + synapse/federation/federation_client.py | 4 +++- synapse/handlers/federation.py | 13 +++++++++---- synapse/storage/databases/state/store.py | 4 ++-- synapse/storage/persist_events.py | 2 +- synapse/storage/state.py | 6 +++--- 6 files changed, 19 insertions(+), 11 deletions(-) create mode 100644 changelog.d/8422.misc diff --git a/changelog.d/8422.misc b/changelog.d/8422.misc new file mode 100644 index 0000000000..03fba120c6 --- /dev/null +++ b/changelog.d/8422.misc @@ -0,0 +1 @@ +Typing fixes for `synapse.handlers.federation`. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 688d43fffb..302b2f69bc 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -24,10 +24,12 @@ from typing import ( Dict, Iterable, List, + Mapping, Optional, Sequence, Tuple, TypeVar, + Union, ) from prometheus_client import Counter @@ -501,7 +503,7 @@ class FederationClient(FederationBase): user_id: str, membership: str, content: dict, - params: Dict[str, str], + params: Optional[Mapping[str, Union[str, Iterable[str]]]], ) -> Tuple[str, EventBase, RoomVersion]: """ Creates an m.room.member event, with context, without participating in the room. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 5bcfb231b2..0073e7c996 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -155,8 +155,9 @@ class FederationHandler(BaseHandler): self._device_list_updater = hs.get_device_handler().device_list_updater self._maybe_store_room_on_invite = self.store.maybe_store_room_on_invite - # When joining a room we need to queue any events for that room up - self.room_queues = {} + # When joining a room we need to queue any events for that room up. + # For each room, a list of (pdu, origin) tuples. + self.room_queues = {} # type: Dict[str, List[Tuple[EventBase, str]]] self._room_pdu_linearizer = Linearizer("fed_room_pdu") self.third_party_event_rules = hs.get_third_party_event_rules() @@ -814,6 +815,9 @@ class FederationHandler(BaseHandler): dest, room_id, limit=limit, extremities=extremities ) + if not events: + return [] + # ideally we'd sanity check the events here for excess prev_events etc, # but it's hard to reject events at this point without completely # breaking backfill in the same way that it is currently broken by @@ -2164,10 +2168,10 @@ class FederationHandler(BaseHandler): # given state at the event. This should correctly handle cases # like bans, especially with state res v2. - state_sets = await self.state_store.get_state_groups( + state_sets_d = await self.state_store.get_state_groups( event.room_id, extrem_ids ) - state_sets = list(state_sets.values()) + state_sets = list(state_sets_d.values()) # type: List[Iterable[EventBase]] state_sets.append(state) current_states = await self.state_handler.resolve_events( room_version, state_sets, event @@ -2958,6 +2962,7 @@ class FederationHandler(BaseHandler): ) return result["max_stream_id"] else: + assert self.storage.persistence max_stream_token = await self.storage.persistence.persist_events( event_and_contexts, backfilled=backfilled ) diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index 989f0cbc9d..0e31cc811a 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -24,7 +24,7 @@ from synapse.storage.databases.state.bg_updates import StateBackgroundUpdateStor from synapse.storage.state import StateFilter from synapse.storage.types import Cursor from synapse.storage.util.sequence import build_sequence_generator -from synapse.types import StateMap +from synapse.types import MutableStateMap, StateMap from synapse.util.caches.descriptors import cached from synapse.util.caches.dictionary_cache import DictionaryCache @@ -208,7 +208,7 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore): async def _get_state_for_groups( self, groups: Iterable[int], state_filter: StateFilter = StateFilter.all() - ) -> Dict[int, StateMap[str]]: + ) -> Dict[int, MutableStateMap[str]]: """Gets the state at each of a list of state groups, optionally filtering by type/state_key diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py index 603cd7d825..ded6cf9655 100644 --- a/synapse/storage/persist_events.py +++ b/synapse/storage/persist_events.py @@ -197,7 +197,7 @@ class EventsPersistenceStorage: async def persist_events( self, - events_and_contexts: List[Tuple[EventBase, EventContext]], + events_and_contexts: Iterable[Tuple[EventBase, EventContext]], backfilled: bool = False, ) -> RoomStreamToken: """ diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 8f68d968f0..08a69f2f96 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -20,7 +20,7 @@ import attr from synapse.api.constants import EventTypes from synapse.events import EventBase -from synapse.types import StateMap +from synapse.types import MutableStateMap, StateMap logger = logging.getLogger(__name__) @@ -349,7 +349,7 @@ class StateGroupStorage: async def get_state_groups_ids( self, _room_id: str, event_ids: Iterable[str] - ) -> Dict[int, StateMap[str]]: + ) -> Dict[int, MutableStateMap[str]]: """Get the event IDs of all the state for the state groups for the given events Args: @@ -532,7 +532,7 @@ class StateGroupStorage: def _get_state_for_groups( self, groups: Iterable[int], state_filter: StateFilter = StateFilter.all() - ) -> Awaitable[Dict[int, StateMap[str]]]: + ) -> Awaitable[Dict[int, MutableStateMap[str]]]: """Gets the state at each of a list of state groups, optionally filtering by type/state_key From b1433bf231370636b817ffa01e6cda5a567cfafe Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 29 Sep 2020 16:42:19 +0100 Subject: [PATCH 065/134] Don't table scan events on worker startup (#8419) * Fix table scan of events on worker startup. This happened because we assumed "new" writers had an initial stream position of 0, so the replication code tried to fetch all events written by the instance between 0 and the current position. Instead, set the initial position of new writers to the current persisted up to position, on the assumption that new writers won't have written anything before that point. * Consider old writers coming back as "new". Otherwise we'd try and fetch entries between the old stale token and the current position, even though it won't have written any rows. Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- changelog.d/8419.feature | 1 + synapse/storage/util/id_generators.py | 26 +++++++++++++++++++++++++- tests/storage/test_id_generators.py | 18 ++++++++++++++++++ 3 files changed, 44 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8419.feature diff --git a/changelog.d/8419.feature b/changelog.d/8419.feature new file mode 100644 index 0000000000..b363e929ea --- /dev/null +++ b/changelog.d/8419.feature @@ -0,0 +1 @@ +Add experimental support for sharding event persister. diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 4fd7573e26..02fbb656e8 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -273,6 +273,19 @@ class MultiWriterIdGenerator: # Load the current positions of all writers for the stream. if self._writers: + # We delete any stale entries in the positions table. This is + # important if we add back a writer after a long time; we want to + # consider that a "new" writer, rather than using the old stale + # entry here. + sql = """ + DELETE FROM stream_positions + WHERE + stream_name = ? + AND instance_name != ALL(?) + """ + sql = self._db.engine.convert_param_style(sql) + cur.execute(sql, (self._stream_name, self._writers)) + sql = """ SELECT instance_name, stream_id FROM stream_positions WHERE stream_name = ? @@ -453,11 +466,22 @@ class MultiWriterIdGenerator: """Returns the position of the given writer. """ + # If we don't have an entry for the given instance name, we assume it's a + # new writer. + # + # For new writers we assume their initial position to be the current + # persisted up to position. This stops Synapse from doing a full table + # scan when a new writer announces itself over replication. with self._lock: - return self._return_factor * self._current_positions.get(instance_name, 0) + return self._return_factor * self._current_positions.get( + instance_name, self._persisted_upto_position + ) def get_positions(self) -> Dict[str, int]: """Get a copy of the current positon map. + + Note that this won't necessarily include all configured writers if some + writers haven't written anything yet. """ with self._lock: diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py index 4558bee7be..392b08832b 100644 --- a/tests/storage/test_id_generators.py +++ b/tests/storage/test_id_generators.py @@ -390,17 +390,28 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): # Initial config has two writers id_gen = self._create_id_generator("first", writers=["first", "second"]) self.assertEqual(id_gen.get_persisted_upto_position(), 3) + self.assertEqual(id_gen.get_current_token_for_writer("first"), 3) + self.assertEqual(id_gen.get_current_token_for_writer("second"), 5) # New config removes one of the configs. Note that if the writer is # removed from config we assume that it has been shut down and has # finished persisting, hence why the persisted upto position is 5. id_gen_2 = self._create_id_generator("second", writers=["second"]) self.assertEqual(id_gen_2.get_persisted_upto_position(), 5) + self.assertEqual(id_gen_2.get_current_token_for_writer("second"), 5) # This config points to a single, previously unused writer. id_gen_3 = self._create_id_generator("third", writers=["third"]) self.assertEqual(id_gen_3.get_persisted_upto_position(), 5) + # For new writers we assume their initial position to be the current + # persisted up to position. This stops Synapse from doing a full table + # scan when a new writer comes along. + self.assertEqual(id_gen_3.get_current_token_for_writer("third"), 5) + + id_gen_4 = self._create_id_generator("fourth", writers=["third"]) + self.assertEqual(id_gen_4.get_current_token_for_writer("third"), 5) + # Check that we get a sane next stream ID with this new config. async def _get_next_async(): @@ -410,6 +421,13 @@ class MultiWriterIdGeneratorTestCase(HomeserverTestCase): self.get_success(_get_next_async()) self.assertEqual(id_gen_3.get_persisted_upto_position(), 6) + # If we add back the old "first" then we shouldn't see the persisted up + # to position revert back to 3. + id_gen_5 = self._create_id_generator("five", writers=["first", "third"]) + self.assertEqual(id_gen_5.get_persisted_upto_position(), 6) + self.assertEqual(id_gen_5.get_current_token_for_writer("first"), 6) + self.assertEqual(id_gen_5.get_current_token_for_writer("third"), 6) + def test_sequence_consistency(self): """Test that we error out if the table and sequence diverges. """ From c2bdf040aa93f3b542d1b0e2f6ce57853630ec6f Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Tue, 29 Sep 2020 17:15:27 +0100 Subject: [PATCH 066/134] Discard an empty upload_name before persisting an uploaded file (#7905) --- changelog.d/7905.bugfix | 1 + synapse/rest/media/v1/media_repository.py | 7 ++++--- synapse/rest/media/v1/upload_resource.py | 4 ++++ 3 files changed, 9 insertions(+), 3 deletions(-) create mode 100644 changelog.d/7905.bugfix diff --git a/changelog.d/7905.bugfix b/changelog.d/7905.bugfix new file mode 100644 index 0000000000..e60e624412 --- /dev/null +++ b/changelog.d/7905.bugfix @@ -0,0 +1 @@ +Fix a longstanding bug when storing a media file with an empty `upload_name`. diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 69f353d46f..ae6822d6e7 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -139,7 +139,7 @@ class MediaRepository: async def create_content( self, media_type: str, - upload_name: str, + upload_name: Optional[str], content: IO, content_length: int, auth_user: str, @@ -147,8 +147,8 @@ class MediaRepository: """Store uploaded content for a local user and return the mxc URL Args: - media_type: The content type of the file - upload_name: The name of the file + media_type: The content type of the file. + upload_name: The name of the file, if provided. content: A file like object that is the content to store content_length: The length of the content auth_user: The user_id of the uploader @@ -156,6 +156,7 @@ class MediaRepository: Returns: The mxc url of the stored content """ + media_id = random_string(24) file_info = FileInfo(server_name=None, file_id=media_id) diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/v1/upload_resource.py index 3ebf7a68e6..d76f7389e1 100644 --- a/synapse/rest/media/v1/upload_resource.py +++ b/synapse/rest/media/v1/upload_resource.py @@ -63,6 +63,10 @@ class UploadResource(DirectServeJsonResource): msg="Invalid UTF-8 filename parameter: %r" % (upload_name), code=400 ) + # If the name is falsey (e.g. an empty byte string) ensure it is None. + else: + upload_name = None + headers = request.requestHeaders if headers.hasHeader(b"Content-Type"): From 937393abd81e16c7d4bd4d02fe3c0fafafb9611b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 28 Sep 2020 15:20:02 +0100 Subject: [PATCH 067/134] Move `resolve_events_with_store` into StateResolutionHandler --- synapse/handlers/federation.py | 13 +++-- synapse/state/__init__.py | 92 +++++++++++++++++----------------- 2 files changed, 55 insertions(+), 50 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 0073e7c996..1a8144405a 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -21,7 +21,7 @@ import itertools import logging from collections.abc import Container from http import HTTPStatus -from typing import Dict, Iterable, List, Optional, Sequence, Tuple, Union +from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Sequence, Tuple, Union import attr from signedjson.key import decode_verify_key_bytes @@ -69,7 +69,7 @@ from synapse.replication.http.federation import ( ReplicationFederationSendEventsRestServlet, ReplicationStoreRoomOnInviteRestServlet, ) -from synapse.state import StateResolutionStore, resolve_events_with_store +from synapse.state import StateResolutionStore from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.types import ( JsonDict, @@ -85,6 +85,9 @@ from synapse.util.retryutils import NotRetryingDestination from synapse.util.stringutils import shortstr from synapse.visibility import filter_events_for_server +if TYPE_CHECKING: + from synapse.server import HomeServer + logger = logging.getLogger(__name__) @@ -116,7 +119,7 @@ class FederationHandler(BaseHandler): rooms. """ - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): super().__init__(hs) self.hs = hs @@ -126,6 +129,7 @@ class FederationHandler(BaseHandler): self.state_store = self.storage.state self.federation_client = hs.get_federation_client() self.state_handler = hs.get_state_handler() + self._state_resolution_handler = hs.get_state_resolution_handler() self.server_name = hs.hostname self.keyring = hs.get_keyring() self.action_generator = hs.get_action_generator() @@ -381,8 +385,7 @@ class FederationHandler(BaseHandler): event_map[x.event_id] = x room_version = await self.store.get_room_version_id(room_id) - state_map = await resolve_events_with_store( - self.clock, + state_map = await self._state_resolution_handler.resolve_events_with_store( room_id, room_version, state_maps, diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 5a5ea39e01..98ede2ea4f 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -449,8 +449,7 @@ class StateHandler: state_map = {ev.event_id: ev for st in state_sets for ev in st} with Measure(self.clock, "state._resolve_events"): - new_state = await resolve_events_with_store( - self.clock, + new_state = await self._state_resolution_handler.resolve_events_with_store( event.room_id, room_version, state_set_ids, @@ -531,8 +530,7 @@ class StateResolutionHandler: state_groups_histogram.observe(len(state_groups_ids)) with Measure(self.clock, "state._resolve_events"): - new_state = await resolve_events_with_store( - self.clock, + new_state = await self.resolve_events_with_store( room_id, room_version, list(state_groups_ids.values()), @@ -552,6 +550,51 @@ class StateResolutionHandler: return cache + def resolve_events_with_store( + self, + room_id: str, + room_version: str, + state_sets: Sequence[StateMap[str]], + event_map: Optional[Dict[str, EventBase]], + state_res_store: "StateResolutionStore", + ) -> Awaitable[StateMap[str]]: + """ + Args: + room_id: the room we are working in + + room_version: Version of the room + + state_sets: List of dicts of (type, state_key) -> event_id, + which are the different state groups to resolve. + + event_map: + a dict from event_id to event, for any events that we happen to + have in flight (eg, those currently being persisted). This will be + used as a starting point fof finding the state we need; any missing + events will be requested via state_map_factory. + + If None, all events will be fetched via state_res_store. + + state_res_store: a place to fetch events from + + Returns: + a map from (type, state_key) to event_id. + """ + v = KNOWN_ROOM_VERSIONS[room_version] + if v.state_res == StateResolutionVersions.V1: + return v1.resolve_events_with_store( + room_id, state_sets, event_map, state_res_store.get_events + ) + else: + return v2.resolve_events_with_store( + self.clock, + room_id, + room_version, + state_sets, + event_map, + state_res_store, + ) + def _make_state_cache_entry( new_state: StateMap[str], state_groups_ids: Dict[int, StateMap[str]] @@ -605,47 +648,6 @@ def _make_state_cache_entry( ) -def resolve_events_with_store( - clock: Clock, - room_id: str, - room_version: str, - state_sets: Sequence[StateMap[str]], - event_map: Optional[Dict[str, EventBase]], - state_res_store: "StateResolutionStore", -) -> Awaitable[StateMap[str]]: - """ - Args: - room_id: the room we are working in - - room_version: Version of the room - - state_sets: List of dicts of (type, state_key) -> event_id, - which are the different state groups to resolve. - - event_map: - a dict from event_id to event, for any events that we happen to - have in flight (eg, those currently being persisted). This will be - used as a starting point fof finding the state we need; any missing - events will be requested via state_map_factory. - - If None, all events will be fetched via state_res_store. - - state_res_store: a place to fetch events from - - Returns: - a map from (type, state_key) to event_id. - """ - v = KNOWN_ROOM_VERSIONS[room_version] - if v.state_res == StateResolutionVersions.V1: - return v1.resolve_events_with_store( - room_id, state_sets, event_map, state_res_store.get_events - ) - else: - return v2.resolve_events_with_store( - clock, room_id, room_version, state_sets, event_map, state_res_store - ) - - @attr.s(slots=True) class StateResolutionStore: """Interface that allows state resolution algorithms to access the database From ba700074c673597d59d45565e26cf445f89faa57 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 29 Sep 2020 13:04:52 +0100 Subject: [PATCH 068/134] Expose a `get_resource_usage` method in `Measure` --- synapse/util/metrics.py | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 6e57c1ee72..ffdea0de8d 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -19,7 +19,11 @@ from typing import Any, Callable, Optional, TypeVar, cast from prometheus_client import Counter -from synapse.logging.context import LoggingContext, current_context +from synapse.logging.context import ( + ContextResourceUsage, + LoggingContext, + current_context, +) from synapse.metrics import InFlightGauge logger = logging.getLogger(__name__) @@ -104,27 +108,27 @@ class Measure: def __init__(self, clock, name): self.clock = clock self.name = name - self._logging_context = None - self.start = None - - def __enter__(self): - if self._logging_context: - raise RuntimeError("Measure() objects cannot be re-used") - - self.start = self.clock.time() parent_context = current_context() self._logging_context = LoggingContext( "Measure[%s]" % (self.name,), parent_context ) + self.start = None + + def __enter__(self) -> "Measure": + if self.start is not None: + raise RuntimeError("Measure() objects cannot be re-used") + + self.start = self.clock.time() self._logging_context.__enter__() in_flight.register((self.name,), self._update_in_flight) + return self def __exit__(self, exc_type, exc_val, exc_tb): - if not self._logging_context: + if self.start is None: raise RuntimeError("Measure() block exited without being entered") duration = self.clock.time() - self.start - usage = self._logging_context.get_resource_usage() + usage = self.get_resource_usage() in_flight.unregister((self.name,), self._update_in_flight) self._logging_context.__exit__(exc_type, exc_val, exc_tb) @@ -140,6 +144,13 @@ class Measure: except ValueError: logger.warning("Failed to save metrics! Usage: %s", usage) + def get_resource_usage(self) -> ContextResourceUsage: + """Get the resources used within this Measure block + + If the Measure block is still active, returns the resource usage so far. + """ + return self._logging_context.get_resource_usage() + def _update_in_flight(self, metrics): """Gets called when processing in flight metrics """ From 8412c08a87d35fc127f53063c8ede215237a042a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 29 Sep 2020 13:07:09 +0100 Subject: [PATCH 069/134] Move Measure calls into `resolve_events_with_store` --- synapse/state/__init__.py | 63 +++++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 32 deletions(-) diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 98ede2ea4f..b99cf2d8cd 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -448,14 +448,13 @@ class StateHandler: state_map = {ev.event_id: ev for st in state_sets for ev in st} - with Measure(self.clock, "state._resolve_events"): - new_state = await self._state_resolution_handler.resolve_events_with_store( - event.room_id, - room_version, - state_set_ids, - event_map=state_map, - state_res_store=StateResolutionStore(self.store), - ) + new_state = await self._state_resolution_handler.resolve_events_with_store( + event.room_id, + room_version, + state_set_ids, + event_map=state_map, + state_res_store=StateResolutionStore(self.store), + ) return {key: state_map[ev_id] for key, ev_id in new_state.items()} @@ -529,14 +528,13 @@ class StateResolutionHandler: state_groups_histogram.observe(len(state_groups_ids)) - with Measure(self.clock, "state._resolve_events"): - new_state = await self.resolve_events_with_store( - room_id, - room_version, - list(state_groups_ids.values()), - event_map=event_map, - state_res_store=state_res_store, - ) + new_state = await self.resolve_events_with_store( + room_id, + room_version, + list(state_groups_ids.values()), + event_map=event_map, + state_res_store=state_res_store, + ) # if the new state matches any of the input state groups, we can # use that state group again. Otherwise we will generate a state_id @@ -550,14 +548,14 @@ class StateResolutionHandler: return cache - def resolve_events_with_store( + async def resolve_events_with_store( self, room_id: str, room_version: str, state_sets: Sequence[StateMap[str]], event_map: Optional[Dict[str, EventBase]], state_res_store: "StateResolutionStore", - ) -> Awaitable[StateMap[str]]: + ) -> StateMap[str]: """ Args: room_id: the room we are working in @@ -580,20 +578,21 @@ class StateResolutionHandler: Returns: a map from (type, state_key) to event_id. """ - v = KNOWN_ROOM_VERSIONS[room_version] - if v.state_res == StateResolutionVersions.V1: - return v1.resolve_events_with_store( - room_id, state_sets, event_map, state_res_store.get_events - ) - else: - return v2.resolve_events_with_store( - self.clock, - room_id, - room_version, - state_sets, - event_map, - state_res_store, - ) + with Measure(self.clock, "state._resolve_events"): + v = KNOWN_ROOM_VERSIONS[room_version] + if v.state_res == StateResolutionVersions.V1: + return await v1.resolve_events_with_store( + room_id, state_sets, event_map, state_res_store.get_events + ) + else: + return await v2.resolve_events_with_store( + self.clock, + room_id, + room_version, + state_sets, + event_map, + state_res_store, + ) def _make_state_cache_entry( From 057f04fa9fb5134621dff19c758b38fe253ff8a8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 29 Sep 2020 13:07:45 +0100 Subject: [PATCH 070/134] Report state res metrics to Prometheus and log --- synapse/state/__init__.py | 144 ++++++++++++++++++++++++++++++++------ 1 file changed, 124 insertions(+), 20 deletions(-) diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index b99cf2d8cd..31082bb16a 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -13,42 +13,46 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import heapq import logging -from collections import namedtuple +from collections import defaultdict, namedtuple from typing import ( + Any, Awaitable, + Callable, + DefaultDict, Dict, Iterable, List, Optional, Sequence, Set, + Tuple, Union, overload, ) import attr from frozendict import frozendict -from prometheus_client import Histogram +from prometheus_client import Counter, Histogram from typing_extensions import Literal from synapse.api.constants import EventTypes from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, StateResolutionVersions from synapse.events import EventBase from synapse.events.snapshot import EventContext +from synapse.logging.context import ContextResourceUsage from synapse.logging.utils import log_function from synapse.state import v1, v2 from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.roommember import ProfileInfo from synapse.types import Collection, StateMap -from synapse.util import Clock from synapse.util.async_helpers import Linearizer from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.metrics import Measure, measure_func logger = logging.getLogger(__name__) - +metrics_logger = logging.getLogger("synapse.state.metrics") # Metrics for number of state groups involved in a resolution. state_groups_histogram = Histogram( @@ -459,6 +463,33 @@ class StateHandler: return {key: state_map[ev_id] for key, ev_id in new_state.items()} +@attr.s(slots=True) +class _StateResMetrics: + """Keeps track of some usage metrics about state res.""" + + # System and User CPU time, in seconds + cpu_time = attr.ib(type=float, default=0.0) + + # time spent on database transactions (excluding scheduling time). This roughly + # corresponds to the amount of work done on the db server, excluding event fetches. + db_time = attr.ib(type=float, default=0.0) + + # number of events fetched from the db. + db_events = attr.ib(type=int, default=0) + + +_biggest_room_by_cpu_counter = Counter( + "synapse_state_res_cpu_for_biggest_room_seconds", + "CPU time spent performing state resolution for the single most expensive " + "room for state resolution", +) +_biggest_room_by_db_counter = Counter( + "synapse_state_res_db_for_biggest_room_seconds", + "Database time spent performing state resolution for the single most " + "expensive room for state resolution", +) + + class StateResolutionHandler: """Responsible for doing state conflict resolution. @@ -481,6 +512,17 @@ class StateResolutionHandler: reset_expiry_on_get=True, ) + # + # stuff for tracking time spent on state-res by room + # + + # tracks the amount of work done on state res per room + self._state_res_metrics = defaultdict( + _StateResMetrics + ) # type: DefaultDict[str, _StateResMetrics] + + self.clock.looping_call(self._report_metrics, 120 * 1000) + @log_function async def resolve_state_groups( self, @@ -578,21 +620,83 @@ class StateResolutionHandler: Returns: a map from (type, state_key) to event_id. """ - with Measure(self.clock, "state._resolve_events"): - v = KNOWN_ROOM_VERSIONS[room_version] - if v.state_res == StateResolutionVersions.V1: - return await v1.resolve_events_with_store( - room_id, state_sets, event_map, state_res_store.get_events - ) - else: - return await v2.resolve_events_with_store( - self.clock, - room_id, - room_version, - state_sets, - event_map, - state_res_store, - ) + try: + with Measure(self.clock, "state._resolve_events") as m: + v = KNOWN_ROOM_VERSIONS[room_version] + if v.state_res == StateResolutionVersions.V1: + return await v1.resolve_events_with_store( + room_id, state_sets, event_map, state_res_store.get_events + ) + else: + return await v2.resolve_events_with_store( + self.clock, + room_id, + room_version, + state_sets, + event_map, + state_res_store, + ) + finally: + self._record_state_res_metrics(room_id, m.get_resource_usage()) + + def _record_state_res_metrics(self, room_id: str, rusage: ContextResourceUsage): + room_metrics = self._state_res_metrics[room_id] + room_metrics.cpu_time += rusage.ru_utime + rusage.ru_stime + room_metrics.db_time += rusage.db_txn_duration_sec + room_metrics.db_events += rusage.evt_db_fetch_count + + def _report_metrics(self): + if not self._state_res_metrics: + # no state res has happened since the last iteration: don't bother logging. + return + + self._report_biggest( + lambda i: i.cpu_time, "CPU time", _biggest_room_by_cpu_counter, + ) + + self._report_biggest( + lambda i: i.db_time, "DB time", _biggest_room_by_db_counter, + ) + + self._state_res_metrics.clear() + + def _report_biggest( + self, + extract_key: Callable[[_StateResMetrics], Any], + metric_name: str, + prometheus_counter_metric: Counter, + ) -> None: + """Report metrics on the biggest rooms for state res + + Args: + extract_key: a callable which, given a _StateResMetrics, extracts a single + metric to sort by. + metric_name: the name of the metric we have extracted, for the log line + prometheus_counter_metric: a prometheus metric recording the sum of the + the extracted metric + """ + n_to_log = 10 + if not metrics_logger.isEnabledFor(logging.DEBUG): + # only need the most expensive if we don't have debug logging, which + # allows nlargest() to degrade to max() + n_to_log = 1 + + items = self._state_res_metrics.items() + + # log the N biggest rooms + biggest = heapq.nlargest( + n_to_log, items, key=lambda i: extract_key(i[1]) + ) # type: List[Tuple[str, _StateResMetrics]] + metrics_logger.debug( + "%i biggest rooms for state-res by %s: %s", + len(biggest), + metric_name, + ["%s (%gs)" % (r, extract_key(m)) for (r, m) in biggest], + ) + + # report info on the single biggest to prometheus + _, biggest_metrics = biggest[0] + prometheus_counter_metric.inc(extract_key(biggest_metrics)) def _make_state_cache_entry( From d4274dd17e79296d7501aab19cf575f38501877f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 29 Sep 2020 13:08:56 +0100 Subject: [PATCH 071/134] changelog --- changelog.d/8420.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/8420.feature diff --git a/changelog.d/8420.feature b/changelog.d/8420.feature new file mode 100644 index 0000000000..9d6849624d --- /dev/null +++ b/changelog.d/8420.feature @@ -0,0 +1 @@ +Add experimental reporting of metrics on expensive rooms for state-resolution. From 8238b55e08e8fbd7c7169b72281538a3e34c6488 Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Tue, 29 Sep 2020 12:50:25 -0500 Subject: [PATCH 072/134] Update description of server_name config option (#8415) --- changelog.d/8415.doc | 1 + docs/sample_config.yaml | 21 +++++++++++++++++---- synapse/config/server.py | 21 +++++++++++++++++---- 3 files changed, 35 insertions(+), 8 deletions(-) create mode 100644 changelog.d/8415.doc diff --git a/changelog.d/8415.doc b/changelog.d/8415.doc new file mode 100644 index 0000000000..28b5798533 --- /dev/null +++ b/changelog.d/8415.doc @@ -0,0 +1 @@ +Improve description of `server_name` config option in `homserver.yaml`. \ No newline at end of file diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 845f537795..70cc06a6d8 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -33,10 +33,23 @@ ## Server ## -# The domain name of the server, with optional explicit port. -# This is used by remote servers to connect to this server, -# e.g. matrix.org, localhost:8080, etc. -# This is also the last part of your UserID. +# The public-facing domain of the server +# +# The server_name name will appear at the end of usernames and room addresses +# created on this server. For example if the server_name was example.com, +# usernames on this server would be in the format @user:example.com +# +# In most cases you should avoid using a matrix specific subdomain such as +# matrix.example.com or synapse.example.com as the server_name for the same +# reasons you wouldn't use user@email.example.com as your email address. +# See https://github.com/matrix-org/synapse/blob/master/docs/delegate.md +# for information on how to host Synapse on a subdomain while preserving +# a clean server_name. +# +# The server_name cannot be changed later so it is important to +# configure this correctly before you start Synapse. It should be all +# lowercase and may contain an explicit port. +# Examples: matrix.org, localhost:8080 # server_name: "SERVERNAME" diff --git a/synapse/config/server.py b/synapse/config/server.py index 532b910470..ef6d70e3f8 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -641,10 +641,23 @@ class ServerConfig(Config): """\ ## Server ## - # The domain name of the server, with optional explicit port. - # This is used by remote servers to connect to this server, - # e.g. matrix.org, localhost:8080, etc. - # This is also the last part of your UserID. + # The public-facing domain of the server + # + # The server_name name will appear at the end of usernames and room addresses + # created on this server. For example if the server_name was example.com, + # usernames on this server would be in the format @user:example.com + # + # In most cases you should avoid using a matrix specific subdomain such as + # matrix.example.com or synapse.example.com as the server_name for the same + # reasons you wouldn't use user@email.example.com as your email address. + # See https://github.com/matrix-org/synapse/blob/master/docs/delegate.md + # for information on how to host Synapse on a subdomain while preserving + # a clean server_name. + # + # The server_name cannot be changed later so it is important to + # configure this correctly before you start Synapse. It should be all + # lowercase and may contain an explicit port. + # Examples: matrix.org, localhost:8080 # server_name: "%(server_name)s" From ea70f1c362dc4bd6c0f8a67e16ed0971fe095e5b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 29 Sep 2020 21:48:33 +0100 Subject: [PATCH 073/134] Various clean ups to room stream tokens. (#8423) --- changelog.d/8423.misc | 1 + synapse/events/__init__.py | 6 +-- synapse/handlers/admin.py | 2 +- synapse/handlers/device.py | 4 +- synapse/handlers/initial_sync.py | 3 +- synapse/handlers/pagination.py | 5 +-- synapse/handlers/room.py | 4 +- synapse/handlers/sync.py | 20 ++++++--- synapse/notifier.py | 4 +- synapse/replication/tcp/client.py | 6 +-- synapse/rest/admin/__init__.py | 3 +- synapse/storage/databases/main/stream.py | 38 +++++++++-------- synapse/storage/persist_events.py | 5 +-- synapse/types.py | 53 +++++++++++++++--------- tests/rest/client/v1/test_rooms.py | 8 ++-- tests/storage/test_purge.py | 10 ++--- 16 files changed, 96 insertions(+), 76 deletions(-) create mode 100644 changelog.d/8423.misc diff --git a/changelog.d/8423.misc b/changelog.d/8423.misc new file mode 100644 index 0000000000..7260e3fa41 --- /dev/null +++ b/changelog.d/8423.misc @@ -0,0 +1 @@ +Various refactors to simplify stream token handling. diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index bf800a3852..dc49df0812 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -23,7 +23,7 @@ from typing import Dict, Optional, Tuple, Type from unpaddedbase64 import encode_base64 from synapse.api.room_versions import EventFormatVersions, RoomVersion, RoomVersions -from synapse.types import JsonDict +from synapse.types import JsonDict, RoomStreamToken from synapse.util.caches import intern_dict from synapse.util.frozenutils import freeze @@ -118,8 +118,8 @@ class _EventInternalMetadata: # XXX: These are set by StreamWorkerStore._set_before_and_after. # I'm pretty sure that these are never persisted to the database, so shouldn't # be here - before = DictProperty("before") # type: str - after = DictProperty("after") # type: str + before = DictProperty("before") # type: RoomStreamToken + after = DictProperty("after") # type: RoomStreamToken order = DictProperty("order") # type: Tuple[int, int] def get_dict(self) -> JsonDict: diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index dd981c597e..1ce2091b46 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -153,7 +153,7 @@ class AdminHandler(BaseHandler): if not events: break - from_key = RoomStreamToken.parse(events[-1].internal_metadata.after) + from_key = events[-1].internal_metadata.after events = await filter_events_for_client(self.storage, user_id, events) diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 4149520d6c..b9d9098104 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -29,7 +29,6 @@ from synapse.api.errors import ( from synapse.logging.opentracing import log_kv, set_tag, trace from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import ( - RoomStreamToken, StreamToken, get_domain_from_id, get_verify_key_from_cross_signing_key, @@ -113,8 +112,7 @@ class DeviceWorkerHandler(BaseHandler): set_tag("user_id", user_id) set_tag("from_token", from_token) - now_room_id = self.store.get_room_max_stream_ordering() - now_room_key = RoomStreamToken(None, now_room_id) + now_room_key = self.store.get_room_max_token() room_ids = await self.store.get_rooms_for_user(user_id) diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 8cd7eb22a3..43f15435de 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -325,7 +325,8 @@ class InitialSyncHandler(BaseHandler): if limit is None: limit = 10 - stream_token = await self.store.get_stream_token_for_event(member_event_id) + leave_position = await self.store.get_position_for_event(member_event_id) + stream_token = leave_position.to_room_stream_token() messages, token = await self.store.get_recent_events_for_room( room_id, limit=limit, end_token=stream_token diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index a0b3bdb5e0..d6779a4b44 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -25,7 +25,7 @@ from synapse.logging.context import run_in_background from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.state import StateFilter from synapse.streams.config import PaginationConfig -from synapse.types import Requester, RoomStreamToken +from synapse.types import Requester from synapse.util.async_helpers import ReadWriteLock from synapse.util.stringutils import random_string from synapse.visibility import filter_events_for_client @@ -373,10 +373,9 @@ class PaginationHandler: # case "JOIN" would have been returned. assert member_event_id - leave_token_str = await self.store.get_topological_token_for_event( + leave_token = await self.store.get_topological_token_for_event( member_event_id ) - leave_token = RoomStreamToken.parse(leave_token_str) assert leave_token.topological is not None if leave_token.topological < curr_topo: diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 11bf146bed..836b3f381a 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1134,14 +1134,14 @@ class RoomEventSource: events[:] = events[:limit] if events: - end_key = RoomStreamToken.parse(events[-1].internal_metadata.after) + end_key = events[-1].internal_metadata.after else: end_key = to_key return (events, end_key) def get_current_key(self) -> RoomStreamToken: - return RoomStreamToken(None, self.store.get_room_max_stream_ordering()) + return self.store.get_room_max_token() def get_current_key_for_room(self, room_id: str) -> Awaitable[str]: return self.store.get_room_events_max_id(room_id) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index e948efef2e..bfe2583002 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -519,7 +519,7 @@ class SyncHandler: if len(recents) > timeline_limit: limited = True recents = recents[-timeline_limit:] - room_key = RoomStreamToken.parse(recents[0].internal_metadata.before) + room_key = recents[0].internal_metadata.before prev_batch_token = now_token.copy_and_replace("room_key", room_key) @@ -1595,16 +1595,24 @@ class SyncHandler: if leave_events: leave_event = leave_events[-1] - leave_stream_token = await self.store.get_stream_token_for_event( + leave_position = await self.store.get_position_for_event( leave_event.event_id ) - leave_token = since_token.copy_and_replace( - "room_key", leave_stream_token - ) - if since_token and since_token.is_after(leave_token): + # If the leave event happened before the since token then we + # bail. + if since_token and not leave_position.persisted_after( + since_token.room_key + ): continue + # We can safely convert the position of the leave event into a + # stream token as it'll only be used in the context of this + # room. (c.f. the docstring of `to_room_stream_token`). + leave_token = since_token.copy_and_replace( + "room_key", leave_position.to_room_stream_token() + ) + # If this is an out of band message, like a remote invite # rejection, we include it in the recents batch. Otherwise, we # let _load_filtered_recents handle fetching the correct diff --git a/synapse/notifier.py b/synapse/notifier.py index 441b3d15e2..59415f6f88 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -163,7 +163,7 @@ class _NotifierUserStream: """ # Immediately wake up stream if something has already since happened # since their last token. - if self.last_notified_token.is_after(token): + if self.last_notified_token != token: return _NotificationListener(defer.succeed(self.current_token)) else: return _NotificationListener(self.notify_deferred.observe()) @@ -470,7 +470,7 @@ class Notifier: async def check_for_updates( before_token: StreamToken, after_token: StreamToken ) -> EventStreamResult: - if not after_token.is_after(before_token): + if after_token == before_token: return EventStreamResult([], (from_token, from_token)) events = [] # type: List[EventBase] diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 55af3d41ea..e165429cad 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -29,7 +29,7 @@ from synapse.replication.tcp.streams.events import ( EventsStreamEventRow, EventsStreamRow, ) -from synapse.types import PersistedEventPosition, RoomStreamToken, UserID +from synapse.types import PersistedEventPosition, UserID from synapse.util.async_helpers import timeout_deferred from synapse.util.metrics import Measure @@ -152,9 +152,7 @@ class ReplicationDataHandler: if event.type == EventTypes.Member: extra_users = (UserID.from_string(event.state_key),) - max_token = RoomStreamToken( - None, self.store.get_room_max_stream_ordering() - ) + max_token = self.store.get_room_max_token() event_pos = PersistedEventPosition(instance_name, token) self.notifier.on_new_room_event( event, event_pos, max_token, extra_users diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 5c5f00b213..ba53f66f02 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -109,7 +109,8 @@ class PurgeHistoryRestServlet(RestServlet): if event.room_id != room_id: raise SynapseError(400, "Event is for wrong room.") - token = await self.store.get_topological_token_for_event(event_id) + room_token = await self.store.get_topological_token_for_event(event_id) + token = str(room_token) logger.info("[purge] purging up to token %s (event_id %s)", token, event_id) elif "purge_up_to_ts" in body: diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 92e96468b4..37249f1e3f 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -35,7 +35,6 @@ what sort order was used: - topological tokems: "t%d-%d", where the integers map to the topological and stream ordering columns respectively. """ - import abc import logging from collections import namedtuple @@ -54,7 +53,7 @@ from synapse.storage.database import ( ) from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine -from synapse.types import Collection, RoomStreamToken +from synapse.types import Collection, PersistedEventPosition, RoomStreamToken from synapse.util.caches.stream_change_cache import StreamChangeCache if TYPE_CHECKING: @@ -305,6 +304,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore, metaclass=abc.ABCMeta): def get_room_min_stream_ordering(self) -> int: raise NotImplementedError() + def get_room_max_token(self) -> RoomStreamToken: + return RoomStreamToken(None, self.get_room_max_stream_ordering()) + async def get_room_events_stream_for_rooms( self, room_ids: Collection[str], @@ -611,26 +613,28 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore, metaclass=abc.ABCMeta): allow_none=allow_none, ) - async def get_stream_token_for_event(self, event_id: str) -> RoomStreamToken: - """The stream token for an event - Args: - event_id: The id of the event to look up a stream token for. - Raises: - StoreError if the event wasn't in the database. - Returns: - A stream token. + async def get_position_for_event(self, event_id: str) -> PersistedEventPosition: + """Get the persisted position for an event """ - stream_id = await self.get_stream_id_for_event(event_id) - return RoomStreamToken(None, stream_id) + row = await self.db_pool.simple_select_one( + table="events", + keyvalues={"event_id": event_id}, + retcols=("stream_ordering", "instance_name"), + desc="get_position_for_event", + ) - async def get_topological_token_for_event(self, event_id: str) -> str: + return PersistedEventPosition( + row["instance_name"] or "master", row["stream_ordering"] + ) + + async def get_topological_token_for_event(self, event_id: str) -> RoomStreamToken: """The stream token for an event Args: event_id: The id of the event to look up a stream token for. Raises: StoreError if the event wasn't in the database. Returns: - A "t%d-%d" topological token. + A `RoomStreamToken` topological token. """ row = await self.db_pool.simple_select_one( table="events", @@ -638,7 +642,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore, metaclass=abc.ABCMeta): retcols=("stream_ordering", "topological_ordering"), desc="get_topological_token_for_event", ) - return "t%d-%d" % (row["topological_ordering"], row["stream_ordering"]) + return RoomStreamToken(row["topological_ordering"], row["stream_ordering"]) async def get_current_topological_token(self, room_id: str, stream_key: int) -> int: """Gets the topological token in a room after or at the given stream @@ -687,8 +691,8 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore, metaclass=abc.ABCMeta): else: topo = None internal = event.internal_metadata - internal.before = str(RoomStreamToken(topo, stream - 1)) - internal.after = str(RoomStreamToken(topo, stream)) + internal.before = RoomStreamToken(topo, stream - 1) + internal.after = RoomStreamToken(topo, stream) internal.order = (int(topo) if topo else 0, int(stream)) async def get_events_around( diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py index ded6cf9655..72939f3984 100644 --- a/synapse/storage/persist_events.py +++ b/synapse/storage/persist_events.py @@ -229,7 +229,7 @@ class EventsPersistenceStorage: defer.gatherResults(deferreds, consumeErrors=True) ) - return RoomStreamToken(None, self.main_store.get_current_events_token()) + return self.main_store.get_room_max_token() async def persist_event( self, event: EventBase, context: EventContext, backfilled: bool = False @@ -247,11 +247,10 @@ class EventsPersistenceStorage: await make_deferred_yieldable(deferred) - max_persisted_id = self.main_store.get_current_events_token() event_stream_id = event.internal_metadata.stream_ordering pos = PersistedEventPosition(self._instance_name, event_stream_id) - return pos, RoomStreamToken(None, max_persisted_id) + return pos, self.main_store.get_room_max_token() def _maybe_start_persisting(self, room_id: str): async def persisting_queue(item): diff --git a/synapse/types.py b/synapse/types.py index ec39f9e1e8..02bcc197ec 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -413,6 +413,18 @@ class RoomStreamToken: pass raise SynapseError(400, "Invalid token %r" % (string,)) + def copy_and_advance(self, other: "RoomStreamToken") -> "RoomStreamToken": + """Return a new token such that if an event is after both this token and + the other token, then its after the returned token too. + """ + + if self.topological or other.topological: + raise Exception("Can't advance topological tokens") + + max_stream = max(self.stream, other.stream) + + return RoomStreamToken(None, max_stream) + def as_tuple(self) -> Tuple[Optional[int], int]: return (self.topological, self.stream) @@ -458,31 +470,20 @@ class StreamToken: def room_stream_id(self): return self.room_key.stream - def is_after(self, other): - """Does this token contain events that the other doesn't?""" - return ( - (other.room_stream_id < self.room_stream_id) - or (int(other.presence_key) < int(self.presence_key)) - or (int(other.typing_key) < int(self.typing_key)) - or (int(other.receipt_key) < int(self.receipt_key)) - or (int(other.account_data_key) < int(self.account_data_key)) - or (int(other.push_rules_key) < int(self.push_rules_key)) - or (int(other.to_device_key) < int(self.to_device_key)) - or (int(other.device_list_key) < int(self.device_list_key)) - or (int(other.groups_key) < int(self.groups_key)) - ) - def copy_and_advance(self, key, new_value) -> "StreamToken": """Advance the given key in the token to a new value if and only if the new value is after the old value. """ - new_token = self.copy_and_replace(key, new_value) if key == "room_key": - new_id = new_token.room_stream_id - old_id = self.room_stream_id - else: - new_id = int(getattr(new_token, key)) - old_id = int(getattr(self, key)) + new_token = self.copy_and_replace( + "room_key", self.room_key.copy_and_advance(new_value) + ) + return new_token + + new_token = self.copy_and_replace(key, new_value) + new_id = int(getattr(new_token, key)) + old_id = int(getattr(self, key)) + if old_id < new_id: return new_token else: @@ -509,6 +510,18 @@ class PersistedEventPosition: def persisted_after(self, token: RoomStreamToken) -> bool: return token.stream < self.stream + def to_room_stream_token(self) -> RoomStreamToken: + """Converts the position to a room stream token such that events + persisted in the same room after this position will be after the + returned `RoomStreamToken`. + + Note: no guarentees are made about ordering w.r.t. events in other + rooms. + """ + # Doing the naive thing satisfies the desired properties described in + # the docstring. + return RoomStreamToken(None, self.stream) + class ThirdPartyInstanceID( namedtuple("ThirdPartyInstanceID", ("appservice_id", "network_id")) diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 0a567b032f..a3287011e9 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -902,15 +902,15 @@ class RoomMessageListTestCase(RoomBase): # Send a first message in the room, which will be removed by the purge. first_event_id = self.helper.send(self.room_id, "message 1")["event_id"] - first_token = self.get_success( - store.get_topological_token_for_event(first_event_id) + first_token = str( + self.get_success(store.get_topological_token_for_event(first_event_id)) ) # Send a second message in the room, which won't be removed, and which we'll # use as the marker to purge events before. second_event_id = self.helper.send(self.room_id, "message 2")["event_id"] - second_token = self.get_success( - store.get_topological_token_for_event(second_event_id) + second_token = str( + self.get_success(store.get_topological_token_for_event(second_event_id)) ) # Send a third event in the room to ensure we don't fall under any edge case diff --git a/tests/storage/test_purge.py b/tests/storage/test_purge.py index 918387733b..723cd28933 100644 --- a/tests/storage/test_purge.py +++ b/tests/storage/test_purge.py @@ -47,8 +47,8 @@ class PurgeTests(HomeserverTestCase): storage = self.hs.get_storage() # Get the topological token - event = self.get_success( - store.get_topological_token_for_event(last["event_id"]) + event = str( + self.get_success(store.get_topological_token_for_event(last["event_id"])) ) # Purge everything before this topological token @@ -74,12 +74,10 @@ class PurgeTests(HomeserverTestCase): storage = self.hs.get_datastore() # Set the topological token higher than it should be - event = self.get_success( + token = self.get_success( storage.get_topological_token_for_event(last["event_id"]) ) - event = "t{}-{}".format( - *list(map(lambda x: x + 1, map(int, event[1:].split("-")))) - ) + event = "t{}-{}".format(token.topological + 1, token.stream + 1) # Purge everything before this topological token purge = defer.ensureDeferred(storage.purge_history(self.room_id, event, True)) From ceafb5a1c61f699d659b1b38577b1c2264721e28 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 30 Sep 2020 16:42:05 +0100 Subject: [PATCH 074/134] Drop support for ancient prometheus_client (#8426) Drop compatibility hacks for prometheus-client pre 0.4.0. Debian stretch and Fedora 31 both have newer versions, so hopefully this will be ok. --- changelog.d/8426.removal | 1 + synapse/metrics/_exposition.py | 24 ++---------------------- synapse/python_dependencies.py | 6 +++++- 3 files changed, 8 insertions(+), 23 deletions(-) create mode 100644 changelog.d/8426.removal diff --git a/changelog.d/8426.removal b/changelog.d/8426.removal new file mode 100644 index 0000000000..a56277fe7a --- /dev/null +++ b/changelog.d/8426.removal @@ -0,0 +1 @@ +Drop support for `prometheus_client` older than 0.4.0. diff --git a/synapse/metrics/_exposition.py b/synapse/metrics/_exposition.py index 4304c60d56..c6457ba450 100644 --- a/synapse/metrics/_exposition.py +++ b/synapse/metrics/_exposition.py @@ -24,7 +24,6 @@ expect, and the newer "best practice" version of the up-to-date official client. import math import threading -from collections import namedtuple from http.server import BaseHTTPRequestHandler, HTTPServer from socketserver import ThreadingMixIn from urllib.parse import parse_qs, urlparse @@ -35,14 +34,6 @@ from twisted.web.resource import Resource from synapse.util import caches -try: - from prometheus_client.samples import Sample -except ImportError: - Sample = namedtuple( # type: ignore[no-redef] # noqa - "Sample", ["name", "labels", "value", "timestamp", "exemplar"] - ) - - CONTENT_TYPE_LATEST = str("text/plain; version=0.0.4; charset=utf-8") @@ -93,17 +84,6 @@ def sample_line(line, name): ) -def nameify_sample(sample): - """ - If we get a prometheus_client<0.4.0 sample as a tuple, transform it into a - namedtuple which has the names we expect. - """ - if not isinstance(sample, Sample): - sample = Sample(*sample, None, None) - - return sample - - def generate_latest(registry, emit_help=False): # Trigger the cache metrics to be rescraped, which updates the common @@ -144,7 +124,7 @@ def generate_latest(registry, emit_help=False): ) ) output.append("# TYPE {0} {1}\n".format(mname, mtype)) - for sample in map(nameify_sample, metric.samples): + for sample in metric.samples: # Get rid of the OpenMetrics specific samples for suffix in ["_created", "_gsum", "_gcount"]: if sample.name.endswith(suffix): @@ -172,7 +152,7 @@ def generate_latest(registry, emit_help=False): ) ) output.append("# TYPE {0} {1}\n".format(mnewname, mtype)) - for sample in map(nameify_sample, metric.samples): + for sample in metric.samples: # Get rid of the OpenMetrics specific samples for suffix in ["_created", "_gsum", "_gcount"]: if sample.name.endswith(suffix): diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 288631477e..0ddead8a0f 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -68,7 +68,11 @@ REQUIREMENTS = [ "pymacaroons>=0.13.0", "msgpack>=0.5.2", "phonenumbers>=8.2.0", - "prometheus_client>=0.0.18,<0.9.0", + # we use GaugeHistogramMetric, which was added in prom-client 0.4.0. + # prom-client has a history of breaking backwards compatibility between + # minor versions (https://github.com/prometheus/client_python/issues/317), + # so we also pin the minor version. + "prometheus_client>=0.4.0,<0.9.0", # we use attr.validators.deep_iterable, which arrived in 19.1.0 (Note: # Fedora 31 only has 19.1, so if we want to upgrade we should wait until 33 # is out in November.) From 1c8ca2c54363dc09744f9618f30181f015e63ffe Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 30 Sep 2020 16:44:10 +0100 Subject: [PATCH 075/134] Fix _exposition.py to stop stripping samples Our hacked-up `_exposition.py` was stripping out some samples it shouldn't have been. Put them back in, to more closely match the upstream `exposition.py`. --- synapse/metrics/_exposition.py | 40 ++++++++++++++++++++++++---------- 1 file changed, 29 insertions(+), 11 deletions(-) diff --git a/synapse/metrics/_exposition.py b/synapse/metrics/_exposition.py index c6457ba450..734271e765 100644 --- a/synapse/metrics/_exposition.py +++ b/synapse/metrics/_exposition.py @@ -26,6 +26,7 @@ import math import threading from http.server import BaseHTTPRequestHandler, HTTPServer from socketserver import ThreadingMixIn +from typing import Dict, List from urllib.parse import parse_qs, urlparse from prometheus_client import REGISTRY @@ -124,16 +125,33 @@ def generate_latest(registry, emit_help=False): ) ) output.append("# TYPE {0} {1}\n".format(mname, mtype)) - for sample in metric.samples: - # Get rid of the OpenMetrics specific samples + + om_samples = {} # type: Dict[str, List[str]] + for s in metric.samples: for suffix in ["_created", "_gsum", "_gcount"]: - if sample.name.endswith(suffix): + if s.name == metric.name + suffix: + # OpenMetrics specific sample, put in a gauge at the end. + # (these come from gaugehistograms which don't get renamed, + # so no need to faff with mnewname) + om_samples.setdefault(suffix, []).append(sample_line(s, s.name)) break else: - newname = sample.name.replace(mnewname, mname) + newname = s.name.replace(mnewname, mname) if ":" in newname and newname.endswith("_total"): newname = newname[: -len("_total")] - output.append(sample_line(sample, newname)) + output.append(sample_line(s, newname)) + + for suffix, lines in sorted(om_samples.items()): + if emit_help: + output.append( + "# HELP {0}{1} {2}\n".format( + metric.name, + suffix, + metric.documentation.replace("\\", r"\\").replace("\n", r"\n"), + ) + ) + output.append("# TYPE {0}{1} gauge\n".format(metric.name, suffix)) + output.extend(lines) # Get rid of the weird colon things while we're at it if mtype == "counter": @@ -152,16 +170,16 @@ def generate_latest(registry, emit_help=False): ) ) output.append("# TYPE {0} {1}\n".format(mnewname, mtype)) - for sample in metric.samples: - # Get rid of the OpenMetrics specific samples + + for s in metric.samples: + # Get rid of the OpenMetrics specific samples (we should already have + # dealt with them above anyway.) for suffix in ["_created", "_gsum", "_gcount"]: - if sample.name.endswith(suffix): + if s.name == metric.name + suffix: break else: output.append( - sample_line( - sample, sample.name.replace(":total", "").replace(":", "_") - ) + sample_line(s, s.name.replace(":total", "").replace(":", "_")) ) return "".join(output).encode("utf-8") From 6d2d42f8fb04599713d3e6e7fc3bc4c9b7063c9a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 29 Sep 2020 22:26:28 +0100 Subject: [PATCH 076/134] Rewrite BucketCollector This was a bit unweildy for what I wanted: in particular, I wanted to assign each measurement straight into a bucket, rather than storing an intermediate Counter which didn't do any bucketing at all. I've replaced it with something that is hopefully a bit easier to use. (I'm not entirely sure what the difference between a HistogramMetricFamily and a GaugeHistogramMetricFamily is, but given our counters can go down as well as up the latter *sounds* more accurate?) --- synapse/metrics/__init__.py | 113 +++++++++++++--------- synapse/storage/databases/main/metrics.py | 26 ++--- tests/storage/test_event_metrics.py | 19 ++-- 3 files changed, 88 insertions(+), 70 deletions(-) diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index a1f7ca3449..b8d2a8e8a9 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -15,6 +15,7 @@ import functools import gc +import itertools import logging import os import platform @@ -27,8 +28,8 @@ from prometheus_client import Counter, Gauge, Histogram from prometheus_client.core import ( REGISTRY, CounterMetricFamily, + GaugeHistogramMetricFamily, GaugeMetricFamily, - HistogramMetricFamily, ) from twisted.internet import reactor @@ -46,7 +47,7 @@ logger = logging.getLogger(__name__) METRICS_PREFIX = "/_synapse/metrics" running_on_pypy = platform.python_implementation() == "PyPy" -all_gauges = {} # type: Dict[str, Union[LaterGauge, InFlightGauge, BucketCollector]] +all_gauges = {} # type: Dict[str, Union[LaterGauge, InFlightGauge]] HAVE_PROC_SELF_STAT = os.path.exists("/proc/self/stat") @@ -205,63 +206,83 @@ class InFlightGauge: all_gauges[self.name] = self -@attr.s(slots=True, hash=True) -class BucketCollector: - """ - Like a Histogram, but allows buckets to be point-in-time instead of - incrementally added to. +class GaugeBucketCollector: + """Like a Histogram, but the buckets are Gauges which are updated atomically. - Args: - name (str): Base name of metric to be exported to Prometheus. - data_collector (callable -> dict): A synchronous callable that - returns a dict mapping bucket to number of items in the - bucket. If these buckets are not the same as the buckets - given to this class, they will be remapped into them. - buckets (list[float]): List of floats/ints of the buckets to - give to Prometheus. +Inf is ignored, if given. + The data is updated by calling `update_data` with an iterable of measurements. + We assume that the data is updated less frequently than it is reported to + Prometheus, and optimise for that case. """ - name = attr.ib() - data_collector = attr.ib() - buckets = attr.ib() + __slots__ = ("_name", "_documentation", "_bucket_bounds", "_metric") + + def __init__( + self, + name: str, + documentation: str, + buckets: Iterable[float], + registry=REGISTRY, + ): + """ + Args: + name: base name of metric to be exported to Prometheus. (a _bucket suffix + will be added.) + documentation: help text for the metric + buckets: The top bounds of the buckets to report + registry: metric registry to register with + """ + self._name = name + self._documentation = documentation + + # the tops of the buckets + self._bucket_bounds = [float(b) for b in buckets] + if self._bucket_bounds != sorted(self._bucket_bounds): + raise ValueError("Buckets not in sorted order") + + if self._bucket_bounds[-1] != float("inf"): + self._bucket_bounds.append(float("inf")) + + self._metric = self._values_to_metric([]) + registry.register(self) def collect(self): + yield self._metric - # Fetch the data -- this must be synchronous! - data = self.data_collector() + def update_data(self, values: Iterable[float]): + """Update the data to be reported by the metric - buckets = {} # type: Dict[float, int] + The existing data is cleared, and each measurement in the input is assigned + to the relevant bucket. + """ + self._metric = self._values_to_metric(values) - res = [] - for x in data.keys(): - for i, bound in enumerate(self.buckets): - if x <= bound: - buckets[bound] = buckets.get(bound, 0) + data[x] + def _values_to_metric(self, values: Iterable[float]) -> GaugeHistogramMetricFamily: + total = 0.0 + bucket_values = [0 for _ in self._bucket_bounds] - for i in self.buckets: - res.append([str(i), buckets.get(i, 0)]) + for v in values: + # assign each value to a bucket + for i, bound in enumerate(self._bucket_bounds): + if v <= bound: + bucket_values[i] += 1 + break - res.append(["+Inf", sum(data.values())]) + # ... and increment the sum + total += v - metric = HistogramMetricFamily( - self.name, "", buckets=res, sum_value=sum(x * y for x, y in data.items()) + # now, aggregate the bucket values so that they count the number of entries in + # that bucket or below. + accumulated_values = itertools.accumulate(bucket_values) + + return GaugeHistogramMetricFamily( + self._name, + self._documentation, + buckets=list( + zip((str(b) for b in self._bucket_bounds), accumulated_values) + ), + gsum_value=total, ) - yield metric - - def __attrs_post_init__(self): - self.buckets = [float(x) for x in self.buckets if x != "+Inf"] - if self.buckets != sorted(self.buckets): - raise ValueError("Buckets not sorted") - - self.buckets = tuple(self.buckets) - - if self.name in all_gauges.keys(): - logger.warning("%s already registered, reregistering" % (self.name,)) - REGISTRY.unregister(all_gauges.pop(self.name)) - - REGISTRY.register(self) - all_gauges[self.name] = self # diff --git a/synapse/storage/databases/main/metrics.py b/synapse/storage/databases/main/metrics.py index 686052bd83..4efc093b9e 100644 --- a/synapse/storage/databases/main/metrics.py +++ b/synapse/storage/databases/main/metrics.py @@ -12,10 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import typing -from collections import Counter -from synapse.metrics import BucketCollector +from synapse.metrics import GaugeBucketCollector from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage._base import SQLBaseStore from synapse.storage.database import DatabasePool @@ -23,6 +21,14 @@ from synapse.storage.databases.main.event_push_actions import ( EventPushActionsWorkerStore, ) +# Collect metrics on the number of forward extremities that exist. +_extremities_collecter = GaugeBucketCollector( + "synapse_forward_extremities", + "Number of rooms on the server with the given number of forward extremities" + " or fewer", + buckets=[1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500], +) + class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): """Functions to pull various metrics from the DB, for e.g. phone home @@ -32,18 +38,6 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) - # Collect metrics on the number of forward extremities that exist. - # Counter of number of extremities to count - self._current_forward_extremities_amount = ( - Counter() - ) # type: typing.Counter[int] - - BucketCollector( - "synapse_forward_extremities", - lambda: self._current_forward_extremities_amount, - buckets=[1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"], - ) - # Read the extrems every 60 minutes def read_forward_extremities(): # run as a background process to make sure that the database transactions @@ -65,7 +59,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): return txn.fetchall() res = await self.db_pool.runInteraction("read_forward_extremities", fetch) - self._current_forward_extremities_amount = Counter([x[0] for x in res]) + _extremities_collecter.update_data(x[0] for x in res) async def count_daily_messages(self): """ diff --git a/tests/storage/test_event_metrics.py b/tests/storage/test_event_metrics.py index 949846fe33..3957471f3f 100644 --- a/tests/storage/test_event_metrics.py +++ b/tests/storage/test_event_metrics.py @@ -52,14 +52,14 @@ class ExtremStatisticsTestCase(HomeserverTestCase): self.reactor.advance(60 * 60 * 1000) self.pump(1) - items = set( + items = list( filter( lambda x: b"synapse_forward_extremities_" in x, - generate_latest(REGISTRY).split(b"\n"), + generate_latest(REGISTRY, emit_help=False).split(b"\n"), ) ) - expected = { + expected = [ b'synapse_forward_extremities_bucket{le="1.0"} 0.0', b'synapse_forward_extremities_bucket{le="2.0"} 2.0', b'synapse_forward_extremities_bucket{le="3.0"} 2.0', @@ -72,9 +72,12 @@ class ExtremStatisticsTestCase(HomeserverTestCase): b'synapse_forward_extremities_bucket{le="100.0"} 3.0', b'synapse_forward_extremities_bucket{le="200.0"} 3.0', b'synapse_forward_extremities_bucket{le="500.0"} 3.0', - b'synapse_forward_extremities_bucket{le="+Inf"} 3.0', - b"synapse_forward_extremities_count 3.0", - b"synapse_forward_extremities_sum 10.0", - } - + # per https://docs.google.com/document/d/1KwV0mAXwwbvvifBvDKH_LU1YjyXE_wxCkHNoCGq1GX0/edit#heading=h.wghdjzzh72j9, + # "inf" is valid: "this includes variants such as inf" + b'synapse_forward_extremities_bucket{le="inf"} 3.0', + b"# TYPE synapse_forward_extremities_gcount gauge", + b"synapse_forward_extremities_gcount 3.0", + b"# TYPE synapse_forward_extremities_gsum gauge", + b"synapse_forward_extremities_gsum 10.0", + ] self.assertEqual(items, expected) From 20e7c4de262746479000ec507b7a3c37f1779a60 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 29 Sep 2020 22:30:00 +0100 Subject: [PATCH 077/134] Add an improved "forward extremities" metric Hopefully, N(extremities) * N(state_events) is a more realistic approximation to "how big a problem is this room?". --- synapse/storage/databases/main/metrics.py | 27 +++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/synapse/storage/databases/main/metrics.py b/synapse/storage/databases/main/metrics.py index 4efc093b9e..92099f95ce 100644 --- a/synapse/storage/databases/main/metrics.py +++ b/synapse/storage/databases/main/metrics.py @@ -29,6 +29,18 @@ _extremities_collecter = GaugeBucketCollector( buckets=[1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500], ) +# we also expose metrics on the "number of excess extremity events", which is +# (E-1)*N, where E is the number of extremities and N is the number of state +# events in the room. This is an approximation to the number of state events +# we could remove from state resolution by reducing the graph to a single +# forward extremity. +_excess_state_events_collecter = GaugeBucketCollector( + "synapse_excess_extremity_events", + "Number of rooms on the server with the given number of excess extremity " + "events, or fewer", + buckets=[0] + [1 << n for n in range(12)], +) + class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): """Functions to pull various metrics from the DB, for e.g. phone home @@ -52,15 +64,26 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): def fetch(txn): txn.execute( """ - select count(*) c from event_forward_extremities - group by room_id + SELECT t1.c, t2.c + FROM ( + SELECT room_id, COUNT(*) c FROM event_forward_extremities + GROUP BY room_id + ) t1 LEFT JOIN ( + SELECT room_id, COUNT(*) c FROM current_state_events + GROUP BY room_id + ) t2 ON t1.room_id = t2.room_id """ ) return txn.fetchall() res = await self.db_pool.runInteraction("read_forward_extremities", fetch) + _extremities_collecter.update_data(x[0] for x in res) + _excess_state_events_collecter.update_data( + (x[0] - 1) * x[1] for x in res if x[1] + ) + async def count_daily_messages(self): """ Returns an estimate of the number of messages sent in the last day. From 32acab3fa2f81890787b4fddab78a1d71e47ea94 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 29 Sep 2020 22:31:45 +0100 Subject: [PATCH 078/134] changelog --- changelog.d/8425.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/8425.feature diff --git a/changelog.d/8425.feature b/changelog.d/8425.feature new file mode 100644 index 0000000000..b4ee5bb74b --- /dev/null +++ b/changelog.d/8425.feature @@ -0,0 +1 @@ +Add experimental prometheus metric to track numbers of "large" rooms for state resolutiom. From 8b40843392e2df80d4f1108295ae6acd972100b0 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 30 Sep 2020 13:02:43 -0400 Subject: [PATCH 079/134] Allow additional SSO properties to be passed to the client (#8413) --- changelog.d/8413.feature | 1 + docs/sample_config.yaml | 8 ++ docs/sso_mapping_providers.md | 14 ++- docs/workers.md | 16 ++++ synapse/config/oidc_config.py | 8 ++ synapse/handlers/auth.py | 60 +++++++++++- synapse/handlers/oidc_handler.py | 56 ++++++++++- synapse/rest/client/v1/login.py | 22 +++-- tests/handlers/test_oidc.py | 160 ++++++++++++++++++++----------- 9 files changed, 278 insertions(+), 67 deletions(-) create mode 100644 changelog.d/8413.feature diff --git a/changelog.d/8413.feature b/changelog.d/8413.feature new file mode 100644 index 0000000000..abe40a901c --- /dev/null +++ b/changelog.d/8413.feature @@ -0,0 +1 @@ +Support passing additional single sign-on parameters to the client. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 70cc06a6d8..066844b5a9 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1748,6 +1748,14 @@ oidc_config: # #display_name_template: "{{ user.given_name }} {{ user.last_name }}" + # Jinja2 templates for extra attributes to send back to the client during + # login. + # + # Note that these are non-standard and clients will ignore them without modifications. + # + #extra_attributes: + #birthdate: "{{ user.birthdate }}" + # Enable CAS for registration and login. diff --git a/docs/sso_mapping_providers.md b/docs/sso_mapping_providers.md index abea432343..32b06aa2c5 100644 --- a/docs/sso_mapping_providers.md +++ b/docs/sso_mapping_providers.md @@ -57,7 +57,7 @@ A custom mapping provider must specify the following methods: - This method must return a string, which is the unique identifier for the user. Commonly the ``sub`` claim of the response. * `map_user_attributes(self, userinfo, token)` - - This method should be async. + - This method must be async. - Arguments: - `userinfo` - A `authlib.oidc.core.claims.UserInfo` object to extract user information from. @@ -66,6 +66,18 @@ A custom mapping provider must specify the following methods: - Returns a dictionary with two keys: - localpart: A required string, used to generate the Matrix ID. - displayname: An optional string, the display name for the user. +* `get_extra_attributes(self, userinfo, token)` + - This method must be async. + - Arguments: + - `userinfo` - A `authlib.oidc.core.claims.UserInfo` object to extract user + information from. + - `token` - A dictionary which includes information necessary to make + further requests to the OpenID provider. + - Returns a dictionary that is suitable to be serialized to JSON. This + will be returned as part of the response during a successful login. + + Note that care should be taken to not overwrite any of the parameters + usually returned as part of the [login response](https://matrix.org/docs/spec/client_server/latest#post-matrix-client-r0-login). ### Default OpenID Mapping Provider diff --git a/docs/workers.md b/docs/workers.md index df0ac84d94..ad4d8ca9f2 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -243,6 +243,22 @@ for the room are in flight: ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/messages$ +Additionally, the following endpoints should be included if Synapse is configured +to use SSO (you only need to include the ones for whichever SSO provider you're +using): + + # OpenID Connect requests. + ^/_matrix/client/(api/v1|r0|unstable)/login/sso/redirect$ + ^/_synapse/oidc/callback$ + + # SAML requests. + ^/_matrix/client/(api/v1|r0|unstable)/login/sso/redirect$ + ^/_matrix/saml2/authn_response$ + + # CAS requests. + ^/_matrix/client/(api/v1|r0|unstable)/login/(cas|sso)/redirect$ + ^/_matrix/client/(api/v1|r0|unstable)/login/cas/ticket$ + Note that a HTTP listener with `client` and `federation` resources must be configured in the `worker_listeners` option in the worker config. diff --git a/synapse/config/oidc_config.py b/synapse/config/oidc_config.py index 70fc8a2f62..f924116819 100644 --- a/synapse/config/oidc_config.py +++ b/synapse/config/oidc_config.py @@ -204,6 +204,14 @@ class OIDCConfig(Config): # If unset, no displayname will be set. # #display_name_template: "{{{{ user.given_name }}}} {{{{ user.last_name }}}}" + + # Jinja2 templates for extra attributes to send back to the client during + # login. + # + # Note that these are non-standard and clients will ignore them without modifications. + # + #extra_attributes: + #birthdate: "{{{{ user.birthdate }}}}" """.format( mapping_provider=DEFAULT_USER_MAPPING_PROVIDER ) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 0322b60cfc..00eae92052 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -137,6 +137,15 @@ def login_id_phone_to_thirdparty(identifier: JsonDict) -> Dict[str, str]: } +@attr.s(slots=True) +class SsoLoginExtraAttributes: + """Data we track about SAML2 sessions""" + + # time the session was created, in milliseconds + creation_time = attr.ib(type=int) + extra_attributes = attr.ib(type=JsonDict) + + class AuthHandler(BaseHandler): SESSION_EXPIRE_MS = 48 * 60 * 60 * 1000 @@ -239,6 +248,10 @@ class AuthHandler(BaseHandler): # cast to tuple for use with str.startswith self._whitelisted_sso_clients = tuple(hs.config.sso_client_whitelist) + # A mapping of user ID to extra attributes to include in the login + # response. + self._extra_attributes = {} # type: Dict[str, SsoLoginExtraAttributes] + async def validate_user_via_ui_auth( self, requester: Requester, @@ -1165,6 +1178,7 @@ class AuthHandler(BaseHandler): registered_user_id: str, request: SynapseRequest, client_redirect_url: str, + extra_attributes: Optional[JsonDict] = None, ): """Having figured out a mxid for this user, complete the HTTP request @@ -1173,6 +1187,8 @@ class AuthHandler(BaseHandler): request: The request to complete. client_redirect_url: The URL to which to redirect the user at the end of the process. + extra_attributes: Extra attributes which will be passed to the client + during successful login. Must be JSON serializable. """ # If the account has been deactivated, do not proceed with the login # flow. @@ -1181,19 +1197,30 @@ class AuthHandler(BaseHandler): respond_with_html(request, 403, self._sso_account_deactivated_template) return - self._complete_sso_login(registered_user_id, request, client_redirect_url) + self._complete_sso_login( + registered_user_id, request, client_redirect_url, extra_attributes + ) def _complete_sso_login( self, registered_user_id: str, request: SynapseRequest, client_redirect_url: str, + extra_attributes: Optional[JsonDict] = None, ): """ The synchronous portion of complete_sso_login. This exists purely for backwards compatibility of synapse.module_api.ModuleApi. """ + # Store any extra attributes which will be passed in the login response. + # Note that this is per-user so it may overwrite a previous value, this + # is considered OK since the newest SSO attributes should be most valid. + if extra_attributes: + self._extra_attributes[registered_user_id] = SsoLoginExtraAttributes( + self._clock.time_msec(), extra_attributes, + ) + # Create a login token login_token = self.macaroon_gen.generate_short_term_login_token( registered_user_id @@ -1226,6 +1253,37 @@ class AuthHandler(BaseHandler): ) respond_with_html(request, 200, html) + async def _sso_login_callback(self, login_result: JsonDict) -> None: + """ + A login callback which might add additional attributes to the login response. + + Args: + login_result: The data to be sent to the client. Includes the user + ID and access token. + """ + # Expire attributes before processing. Note that there shouldn't be any + # valid logins that still have extra attributes. + self._expire_sso_extra_attributes() + + extra_attributes = self._extra_attributes.get(login_result["user_id"]) + if extra_attributes: + login_result.update(extra_attributes.extra_attributes) + + def _expire_sso_extra_attributes(self) -> None: + """ + Iterate through the mapping of user IDs to extra attributes and remove any that are no longer valid. + """ + # TODO This should match the amount of time the macaroon is valid for. + LOGIN_TOKEN_EXPIRATION_TIME = 2 * 60 * 1000 + expire_before = self._clock.time_msec() - LOGIN_TOKEN_EXPIRATION_TIME + to_expire = set() + for user_id, data in self._extra_attributes.items(): + if data.creation_time < expire_before: + to_expire.add(user_id) + for user_id in to_expire: + logger.debug("Expiring extra attributes for user %s", user_id) + del self._extra_attributes[user_id] + @staticmethod def add_query_param_to_url(url: str, param_name: str, param: Any): url_parts = list(urllib.parse.urlparse(url)) diff --git a/synapse/handlers/oidc_handler.py b/synapse/handlers/oidc_handler.py index 0e06e4408d..19cd652675 100644 --- a/synapse/handlers/oidc_handler.py +++ b/synapse/handlers/oidc_handler.py @@ -37,7 +37,7 @@ from synapse.config import ConfigError from synapse.http.server import respond_with_html from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable -from synapse.types import UserID, map_username_to_mxid_localpart +from synapse.types import JsonDict, UserID, map_username_to_mxid_localpart from synapse.util import json_decoder if TYPE_CHECKING: @@ -707,6 +707,15 @@ class OidcHandler: self._render_error(request, "mapping_error", str(e)) return + # Mapping providers might not have get_extra_attributes: only call this + # method if it exists. + extra_attributes = None + get_extra_attributes = getattr( + self._user_mapping_provider, "get_extra_attributes", None + ) + if get_extra_attributes: + extra_attributes = await get_extra_attributes(userinfo, token) + # and finally complete the login if ui_auth_session_id: await self._auth_handler.complete_sso_ui_auth( @@ -714,7 +723,7 @@ class OidcHandler: ) else: await self._auth_handler.complete_sso_login( - user_id, request, client_redirect_url + user_id, request, client_redirect_url, extra_attributes ) def _generate_oidc_session_token( @@ -984,7 +993,7 @@ class OidcMappingProvider(Generic[C]): async def map_user_attributes( self, userinfo: UserInfo, token: Token ) -> UserAttribute: - """Map a ``UserInfo`` objects into user attributes. + """Map a `UserInfo` object into user attributes. Args: userinfo: An object representing the user given by the OIDC provider @@ -995,6 +1004,18 @@ class OidcMappingProvider(Generic[C]): """ raise NotImplementedError() + async def get_extra_attributes(self, userinfo: UserInfo, token: Token) -> JsonDict: + """Map a `UserInfo` object into additional attributes passed to the client during login. + + Args: + userinfo: An object representing the user given by the OIDC provider + token: A dict with the tokens returned by the provider + + Returns: + A dict containing additional attributes. Must be JSON serializable. + """ + return {} + # Used to clear out "None" values in templates def jinja_finalize(thing): @@ -1009,6 +1030,7 @@ class JinjaOidcMappingConfig: subject_claim = attr.ib() # type: str localpart_template = attr.ib() # type: Template display_name_template = attr.ib() # type: Optional[Template] + extra_attributes = attr.ib() # type: Dict[str, Template] class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]): @@ -1047,10 +1069,28 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]): % (e,) ) + extra_attributes = {} # type Dict[str, Template] + if "extra_attributes" in config: + extra_attributes_config = config.get("extra_attributes") or {} + if not isinstance(extra_attributes_config, dict): + raise ConfigError( + "oidc_config.user_mapping_provider.config.extra_attributes must be a dict" + ) + + for key, value in extra_attributes_config.items(): + try: + extra_attributes[key] = env.from_string(value) + except Exception as e: + raise ConfigError( + "invalid jinja template for oidc_config.user_mapping_provider.config.extra_attributes.%s: %r" + % (key, e) + ) + return JinjaOidcMappingConfig( subject_claim=subject_claim, localpart_template=localpart_template, display_name_template=display_name_template, + extra_attributes=extra_attributes, ) def get_remote_user_id(self, userinfo: UserInfo) -> str: @@ -1071,3 +1111,13 @@ class JinjaOidcMappingProvider(OidcMappingProvider[JinjaOidcMappingConfig]): display_name = None return UserAttribute(localpart=localpart, display_name=display_name) + + async def get_extra_attributes(self, userinfo: UserInfo, token: Token) -> JsonDict: + extras = {} # type: Dict[str, str] + for key, template in self._config.extra_attributes.items(): + try: + extras[key] = template.render(user=userinfo).strip() + except Exception as e: + # Log an error and skip this value (don't break login for this). + logger.error("Failed to render OIDC extra attribute %s: %s" % (key, e)) + return extras diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 250b03a025..b9347b87c7 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -284,9 +284,7 @@ class LoginRestServlet(RestServlet): self, user_id: str, login_submission: JsonDict, - callback: Optional[ - Callable[[Dict[str, str]], Awaitable[Dict[str, str]]] - ] = None, + callback: Optional[Callable[[Dict[str, str]], Awaitable[None]]] = None, create_non_existent_users: bool = False, ) -> Dict[str, str]: """Called when we've successfully authed the user and now need to @@ -299,12 +297,12 @@ class LoginRestServlet(RestServlet): Args: user_id: ID of the user to register. login_submission: Dictionary of login information. - callback: Callback function to run after registration. + callback: Callback function to run after login. create_non_existent_users: Whether to create the user if they don't exist. Defaults to False. Returns: - result: Dictionary of account information after successful registration. + result: Dictionary of account information after successful login. """ # Before we actually log them in we check if they've already logged in @@ -339,14 +337,24 @@ class LoginRestServlet(RestServlet): return result async def _do_token_login(self, login_submission: JsonDict) -> Dict[str, str]: + """ + Handle the final stage of SSO login. + + Args: + login_submission: The JSON request body. + + Returns: + The body of the JSON response. + """ token = login_submission["token"] auth_handler = self.auth_handler user_id = await auth_handler.validate_short_term_login_token_and_get_user_id( token ) - result = await self._complete_login(user_id, login_submission) - return result + return await self._complete_login( + user_id, login_submission, self.auth_handler._sso_login_callback + ) async def _do_jwt_login(self, login_submission: JsonDict) -> Dict[str, str]: token = login_submission.get("token", None) diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index 5910772aa8..d5087e58be 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -21,7 +21,6 @@ from mock import Mock, patch import attr import pymacaroons -from twisted.internet import defer from twisted.python.failure import Failure from twisted.web._newclient import ResponseDone @@ -87,6 +86,13 @@ class TestMappingProvider(OidcMappingProvider): async def map_user_attributes(self, userinfo, token): return {"localpart": userinfo["username"], "display_name": None} + # Do not include get_extra_attributes to test backwards compatibility paths. + + +class TestMappingProviderExtra(TestMappingProvider): + async def get_extra_attributes(self, userinfo, token): + return {"phone": userinfo["phone"]} + def simple_async_mock(return_value=None, raises=None): # AsyncMock is not available in python3.5, this mimics part of its behaviour @@ -126,7 +132,7 @@ class OidcHandlerTestCase(HomeserverTestCase): config = self.default_config() config["public_baseurl"] = BASE_URL - oidc_config = config.get("oidc_config", {}) + oidc_config = {} oidc_config["enabled"] = True oidc_config["client_id"] = CLIENT_ID oidc_config["client_secret"] = CLIENT_SECRET @@ -135,6 +141,10 @@ class OidcHandlerTestCase(HomeserverTestCase): oidc_config["user_mapping_provider"] = { "module": __name__ + ".TestMappingProvider", } + + # Update this config with what's in the default config so that + # override_config works as expected. + oidc_config.update(config.get("oidc_config", {})) config["oidc_config"] = oidc_config hs = self.setup_test_homeserver( @@ -165,11 +175,10 @@ class OidcHandlerTestCase(HomeserverTestCase): self.assertEqual(self.handler._client_auth.client_secret, CLIENT_SECRET) @override_config({"oidc_config": {"discover": True}}) - @defer.inlineCallbacks def test_discovery(self): """The handler should discover the endpoints from OIDC discovery document.""" # This would throw if some metadata were invalid - metadata = yield defer.ensureDeferred(self.handler.load_metadata()) + metadata = self.get_success(self.handler.load_metadata()) self.http_client.get_json.assert_called_once_with(WELL_KNOWN) self.assertEqual(metadata.issuer, ISSUER) @@ -181,43 +190,40 @@ class OidcHandlerTestCase(HomeserverTestCase): # subsequent calls should be cached self.http_client.reset_mock() - yield defer.ensureDeferred(self.handler.load_metadata()) + self.get_success(self.handler.load_metadata()) self.http_client.get_json.assert_not_called() @override_config({"oidc_config": COMMON_CONFIG}) - @defer.inlineCallbacks def test_no_discovery(self): """When discovery is disabled, it should not try to load from discovery document.""" - yield defer.ensureDeferred(self.handler.load_metadata()) + self.get_success(self.handler.load_metadata()) self.http_client.get_json.assert_not_called() @override_config({"oidc_config": COMMON_CONFIG}) - @defer.inlineCallbacks def test_load_jwks(self): """JWKS loading is done once (then cached) if used.""" - jwks = yield defer.ensureDeferred(self.handler.load_jwks()) + jwks = self.get_success(self.handler.load_jwks()) self.http_client.get_json.assert_called_once_with(JWKS_URI) self.assertEqual(jwks, {"keys": []}) # subsequent calls should be cached
 self.http_client.reset_mock() - yield defer.ensureDeferred(self.handler.load_jwks()) + self.get_success(self.handler.load_jwks()) self.http_client.get_json.assert_not_called() # 
unless forced self.http_client.reset_mock() - yield defer.ensureDeferred(self.handler.load_jwks(force=True)) + self.get_success(self.handler.load_jwks(force=True)) self.http_client.get_json.assert_called_once_with(JWKS_URI) # Throw if the JWKS uri is missing with self.metadata_edit({"jwks_uri": None}): - with self.assertRaises(RuntimeError): - yield defer.ensureDeferred(self.handler.load_jwks(force=True)) + self.get_failure(self.handler.load_jwks(force=True), RuntimeError) # Return empty key set if JWKS are not used self.handler._scopes = [] # not asking the openid scope self.http_client.get_json.reset_mock() - jwks = yield defer.ensureDeferred(self.handler.load_jwks(force=True)) + jwks = self.get_success(self.handler.load_jwks(force=True)) self.http_client.get_json.assert_not_called() self.assertEqual(jwks, {"keys": []}) @@ -299,11 +305,10 @@ class OidcHandlerTestCase(HomeserverTestCase): # This should not throw self.handler._validate_metadata() - @defer.inlineCallbacks def test_redirect_request(self): """The redirect request has the right arguments & generates a valid session cookie.""" req = Mock(spec=["addCookie"]) - url = yield defer.ensureDeferred( + url = self.get_success( self.handler.handle_redirect_request(req, b"http://client/redirect") ) url = urlparse(url) @@ -343,20 +348,18 @@ class OidcHandlerTestCase(HomeserverTestCase): self.assertEqual(params["nonce"], [nonce]) self.assertEqual(redirect, "http://client/redirect") - @defer.inlineCallbacks def test_callback_error(self): """Errors from the provider returned in the callback are displayed.""" self.handler._render_error = Mock() request = Mock(args={}) request.args[b"error"] = [b"invalid_client"] - yield defer.ensureDeferred(self.handler.handle_oidc_callback(request)) + self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_client", "") request.args[b"error_description"] = [b"some description"] - yield defer.ensureDeferred(self.handler.handle_oidc_callback(request)) + self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_client", "some description") - @defer.inlineCallbacks def test_callback(self): """Code callback works and display errors if something went wrong. @@ -377,7 +380,7 @@ class OidcHandlerTestCase(HomeserverTestCase): "sub": "foo", "preferred_username": "bar", } - user_id = UserID("foo", "domain.org") + user_id = "@foo:domain.org" self.handler._render_error = Mock(return_value=None) self.handler._exchange_code = simple_async_mock(return_value=token) self.handler._parse_id_token = simple_async_mock(return_value=userinfo) @@ -394,13 +397,12 @@ class OidcHandlerTestCase(HomeserverTestCase): client_redirect_url = "http://client/redirect" user_agent = "Browser" ip_address = "10.0.0.1" - session = self.handler._generate_oidc_session_token( + request.getCookie.return_value = self.handler._generate_oidc_session_token( state=state, nonce=nonce, client_redirect_url=client_redirect_url, ui_auth_session_id=None, ) - request.getCookie.return_value = session request.args = {} request.args[b"code"] = [code.encode("utf-8")] @@ -410,10 +412,10 @@ class OidcHandlerTestCase(HomeserverTestCase): request.requestHeaders.getRawHeaders.return_value = [user_agent.encode("ascii")] request.getClientIP.return_value = ip_address - yield defer.ensureDeferred(self.handler.handle_oidc_callback(request)) + self.get_success(self.handler.handle_oidc_callback(request)) self.handler._auth_handler.complete_sso_login.assert_called_once_with( - user_id, request, client_redirect_url, + user_id, request, client_redirect_url, {}, ) self.handler._exchange_code.assert_called_once_with(code) self.handler._parse_id_token.assert_called_once_with(token, nonce=nonce) @@ -427,13 +429,13 @@ class OidcHandlerTestCase(HomeserverTestCase): self.handler._map_userinfo_to_user = simple_async_mock( raises=MappingException() ) - yield defer.ensureDeferred(self.handler.handle_oidc_callback(request)) + self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("mapping_error") self.handler._map_userinfo_to_user = simple_async_mock(return_value=user_id) # Handle ID token errors self.handler._parse_id_token = simple_async_mock(raises=Exception()) - yield defer.ensureDeferred(self.handler.handle_oidc_callback(request)) + self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_token") self.handler._auth_handler.complete_sso_login.reset_mock() @@ -444,10 +446,10 @@ class OidcHandlerTestCase(HomeserverTestCase): # With userinfo fetching self.handler._scopes = [] # do not ask the "openid" scope - yield defer.ensureDeferred(self.handler.handle_oidc_callback(request)) + self.get_success(self.handler.handle_oidc_callback(request)) self.handler._auth_handler.complete_sso_login.assert_called_once_with( - user_id, request, client_redirect_url, + user_id, request, client_redirect_url, {}, ) self.handler._exchange_code.assert_called_once_with(code) self.handler._parse_id_token.assert_not_called() @@ -459,17 +461,16 @@ class OidcHandlerTestCase(HomeserverTestCase): # Handle userinfo fetching error self.handler._fetch_userinfo = simple_async_mock(raises=Exception()) - yield defer.ensureDeferred(self.handler.handle_oidc_callback(request)) + self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("fetch_error") # Handle code exchange failure self.handler._exchange_code = simple_async_mock( raises=OidcError("invalid_request") ) - yield defer.ensureDeferred(self.handler.handle_oidc_callback(request)) + self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_request") - @defer.inlineCallbacks def test_callback_session(self): """The callback verifies the session presence and validity""" self.handler._render_error = Mock(return_value=None) @@ -478,20 +479,20 @@ class OidcHandlerTestCase(HomeserverTestCase): # Missing cookie request.args = {} request.getCookie.return_value = None - yield defer.ensureDeferred(self.handler.handle_oidc_callback(request)) + self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("missing_session", "No session cookie found") # Missing session parameter request.args = {} request.getCookie.return_value = "session" - yield defer.ensureDeferred(self.handler.handle_oidc_callback(request)) + self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_request", "State parameter is missing") # Invalid cookie request.args = {} request.args[b"state"] = [b"state"] request.getCookie.return_value = "session" - yield defer.ensureDeferred(self.handler.handle_oidc_callback(request)) + self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_session") # Mismatching session @@ -504,18 +505,17 @@ class OidcHandlerTestCase(HomeserverTestCase): request.args = {} request.args[b"state"] = [b"mismatching state"] request.getCookie.return_value = session - yield defer.ensureDeferred(self.handler.handle_oidc_callback(request)) + self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("mismatching_session") # Valid session request.args = {} request.args[b"state"] = [b"state"] request.getCookie.return_value = session - yield defer.ensureDeferred(self.handler.handle_oidc_callback(request)) + self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_request") @override_config({"oidc_config": {"client_auth_method": "client_secret_post"}}) - @defer.inlineCallbacks def test_exchange_code(self): """Code exchange behaves correctly and handles various error scenarios.""" token = {"type": "bearer"} @@ -524,7 +524,7 @@ class OidcHandlerTestCase(HomeserverTestCase): return_value=FakeResponse(code=200, phrase=b"OK", body=token_json) ) code = "code" - ret = yield defer.ensureDeferred(self.handler._exchange_code(code)) + ret = self.get_success(self.handler._exchange_code(code)) kwargs = self.http_client.request.call_args[1] self.assertEqual(ret, token) @@ -546,10 +546,9 @@ class OidcHandlerTestCase(HomeserverTestCase): body=b'{"error": "foo", "error_description": "bar"}', ) ) - with self.assertRaises(OidcError) as exc: - yield defer.ensureDeferred(self.handler._exchange_code(code)) - self.assertEqual(exc.exception.error, "foo") - self.assertEqual(exc.exception.error_description, "bar") + exc = self.get_failure(self.handler._exchange_code(code), OidcError) + self.assertEqual(exc.value.error, "foo") + self.assertEqual(exc.value.error_description, "bar") # Internal server error with no JSON body self.http_client.request = simple_async_mock( @@ -557,9 +556,8 @@ class OidcHandlerTestCase(HomeserverTestCase): code=500, phrase=b"Internal Server Error", body=b"Not JSON", ) ) - with self.assertRaises(OidcError) as exc: - yield defer.ensureDeferred(self.handler._exchange_code(code)) - self.assertEqual(exc.exception.error, "server_error") + exc = self.get_failure(self.handler._exchange_code(code), OidcError) + self.assertEqual(exc.value.error, "server_error") # Internal server error with JSON body self.http_client.request = simple_async_mock( @@ -569,17 +567,16 @@ class OidcHandlerTestCase(HomeserverTestCase): body=b'{"error": "internal_server_error"}', ) ) - with self.assertRaises(OidcError) as exc: - yield defer.ensureDeferred(self.handler._exchange_code(code)) - self.assertEqual(exc.exception.error, "internal_server_error") + + exc = self.get_failure(self.handler._exchange_code(code), OidcError) + self.assertEqual(exc.value.error, "internal_server_error") # 4xx error without "error" field self.http_client.request = simple_async_mock( return_value=FakeResponse(code=400, phrase=b"Bad request", body=b"{}",) ) - with self.assertRaises(OidcError) as exc: - yield defer.ensureDeferred(self.handler._exchange_code(code)) - self.assertEqual(exc.exception.error, "server_error") + exc = self.get_failure(self.handler._exchange_code(code), OidcError) + self.assertEqual(exc.value.error, "server_error") # 2xx error with "error" field self.http_client.request = simple_async_mock( @@ -587,9 +584,62 @@ class OidcHandlerTestCase(HomeserverTestCase): code=200, phrase=b"OK", body=b'{"error": "some_error"}', ) ) - with self.assertRaises(OidcError) as exc: - yield defer.ensureDeferred(self.handler._exchange_code(code)) - self.assertEqual(exc.exception.error, "some_error") + exc = self.get_failure(self.handler._exchange_code(code), OidcError) + self.assertEqual(exc.value.error, "some_error") + + @override_config( + { + "oidc_config": { + "user_mapping_provider": { + "module": __name__ + ".TestMappingProviderExtra" + } + } + } + ) + def test_extra_attributes(self): + """ + Login while using a mapping provider that implements get_extra_attributes. + """ + token = { + "type": "bearer", + "id_token": "id_token", + "access_token": "access_token", + } + userinfo = { + "sub": "foo", + "phone": "1234567", + } + user_id = "@foo:domain.org" + self.handler._exchange_code = simple_async_mock(return_value=token) + self.handler._parse_id_token = simple_async_mock(return_value=userinfo) + self.handler._map_userinfo_to_user = simple_async_mock(return_value=user_id) + self.handler._auth_handler.complete_sso_login = simple_async_mock() + request = Mock( + spec=["args", "getCookie", "addCookie", "requestHeaders", "getClientIP"] + ) + + state = "state" + client_redirect_url = "http://client/redirect" + request.getCookie.return_value = self.handler._generate_oidc_session_token( + state=state, + nonce="nonce", + client_redirect_url=client_redirect_url, + ui_auth_session_id=None, + ) + + request.args = {} + request.args[b"code"] = [b"code"] + request.args[b"state"] = [state.encode("utf-8")] + + request.requestHeaders = Mock(spec=["getRawHeaders"]) + request.requestHeaders.getRawHeaders.return_value = [b"Browser"] + request.getClientIP.return_value = "10.0.0.1" + + self.get_success(self.handler.handle_oidc_callback(request)) + + self.handler._auth_handler.complete_sso_login.assert_called_once_with( + user_id, request, client_redirect_url, {"phone": "1234567"}, + ) def test_map_userinfo_to_user(self): """Ensure that mapping the userinfo returned from a provider to an MXID works properly.""" From 7941372ec84786f85ae6d75fd2d7a4af5b72ac98 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 30 Sep 2020 20:29:19 +0100 Subject: [PATCH 080/134] Make token serializing/deserializing async (#8427) The idea is that in future tokens will encode a mapping of instance to position. However, we don't want to include the full instance name in the string representation, so instead we'll have a mapping between instance name and an immutable integer ID in the DB that we can use instead. We'll then do the lookup when we serialize/deserialize the token (we could alternatively pass around an `Instance` type that includes both the name and ID, but that turns out to be a lot more invasive). --- changelog.d/8427.misc | 1 + synapse/handlers/events.py | 4 +- synapse/handlers/initial_sync.py | 14 +++--- synapse/handlers/pagination.py | 8 ++-- synapse/handlers/room.py | 8 ++-- synapse/handlers/search.py | 8 ++-- synapse/rest/admin/__init__.py | 2 +- synapse/rest/client/v1/events.py | 3 +- synapse/rest/client/v1/initial_sync.py | 3 +- synapse/rest/client/v1/room.py | 11 +++-- synapse/rest/client/v2_alpha/keys.py | 3 +- synapse/rest/client/v2_alpha/sync.py | 10 ++--- .../storage/databases/main/purge_events.py | 8 ++-- synapse/streams/config.py | 9 ++-- synapse/types.py | 43 +++++++++++++++---- tests/rest/client/v1/test_rooms.py | 30 +++++++++---- tests/storage/test_purge.py | 9 ++-- 17 files changed, 115 insertions(+), 59 deletions(-) create mode 100644 changelog.d/8427.misc diff --git a/changelog.d/8427.misc b/changelog.d/8427.misc new file mode 100644 index 0000000000..c9656b9112 --- /dev/null +++ b/changelog.d/8427.misc @@ -0,0 +1 @@ +Make stream token serializing/deserializing async. diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 0875b74ea8..539b4fc32e 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -133,8 +133,8 @@ class EventStreamHandler(BaseHandler): chunk = { "chunk": chunks, - "start": tokens[0].to_string(), - "end": tokens[1].to_string(), + "start": await tokens[0].to_string(self.store), + "end": await tokens[1].to_string(self.store), } return chunk diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 43f15435de..39a85801c1 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -203,8 +203,8 @@ class InitialSyncHandler(BaseHandler): messages, time_now=time_now, as_client_event=as_client_event ) ), - "start": start_token.to_string(), - "end": end_token.to_string(), + "start": await start_token.to_string(self.store), + "end": await end_token.to_string(self.store), } d["state"] = await self._event_serializer.serialize_events( @@ -249,7 +249,7 @@ class InitialSyncHandler(BaseHandler): ], "account_data": account_data_events, "receipts": receipt, - "end": now_token.to_string(), + "end": await now_token.to_string(self.store), } return ret @@ -348,8 +348,8 @@ class InitialSyncHandler(BaseHandler): "chunk": ( await self._event_serializer.serialize_events(messages, time_now) ), - "start": start_token.to_string(), - "end": end_token.to_string(), + "start": await start_token.to_string(self.store), + "end": await end_token.to_string(self.store), }, "state": ( await self._event_serializer.serialize_events( @@ -447,8 +447,8 @@ class InitialSyncHandler(BaseHandler): "chunk": ( await self._event_serializer.serialize_events(messages, time_now) ), - "start": start_token.to_string(), - "end": end_token.to_string(), + "start": await start_token.to_string(self.store), + "end": await end_token.to_string(self.store), }, "state": state, "presence": presence, diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index d6779a4b44..2c2a633938 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -413,8 +413,8 @@ class PaginationHandler: if not events: return { "chunk": [], - "start": from_token.to_string(), - "end": next_token.to_string(), + "start": await from_token.to_string(self.store), + "end": await next_token.to_string(self.store), } state = None @@ -442,8 +442,8 @@ class PaginationHandler: events, time_now, as_client_event=as_client_event ) ), - "start": from_token.to_string(), - "end": next_token.to_string(), + "start": await from_token.to_string(self.store), + "end": await next_token.to_string(self.store), } if state: diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 836b3f381a..d5f7c78edf 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1077,11 +1077,13 @@ class RoomContextHandler: # the token, which we replace. token = StreamToken.START - results["start"] = token.copy_and_replace( + results["start"] = await token.copy_and_replace( "room_key", results["start"] - ).to_string() + ).to_string(self.store) - results["end"] = token.copy_and_replace("room_key", results["end"]).to_string() + results["end"] = await token.copy_and_replace( + "room_key", results["end"] + ).to_string(self.store) return results diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index 6a76c20d79..e9402e6e2e 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -362,13 +362,13 @@ class SearchHandler(BaseHandler): self.storage, user.to_string(), res["events_after"] ) - res["start"] = now_token.copy_and_replace( + res["start"] = await now_token.copy_and_replace( "room_key", res["start"] - ).to_string() + ).to_string(self.store) - res["end"] = now_token.copy_and_replace( + res["end"] = await now_token.copy_and_replace( "room_key", res["end"] - ).to_string() + ).to_string(self.store) if include_profile: senders = { diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index ba53f66f02..57cac22252 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -110,7 +110,7 @@ class PurgeHistoryRestServlet(RestServlet): raise SynapseError(400, "Event is for wrong room.") room_token = await self.store.get_topological_token_for_event(event_id) - token = str(room_token) + token = await room_token.to_string(self.store) logger.info("[purge] purging up to token %s (event_id %s)", token, event_id) elif "purge_up_to_ts" in body: diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py index 985d994f6b..1ecb77aa26 100644 --- a/synapse/rest/client/v1/events.py +++ b/synapse/rest/client/v1/events.py @@ -33,6 +33,7 @@ class EventStreamRestServlet(RestServlet): super().__init__() self.event_stream_handler = hs.get_event_stream_handler() self.auth = hs.get_auth() + self.store = hs.get_datastore() async def on_GET(self, request): requester = await self.auth.get_user_by_req(request, allow_guest=True) @@ -44,7 +45,7 @@ class EventStreamRestServlet(RestServlet): if b"room_id" in request.args: room_id = request.args[b"room_id"][0].decode("ascii") - pagin_config = PaginationConfig.from_request(request) + pagin_config = await PaginationConfig.from_request(self.store, request) timeout = EventStreamRestServlet.DEFAULT_LONGPOLL_TIME_MS if b"timeout" in request.args: try: diff --git a/synapse/rest/client/v1/initial_sync.py b/synapse/rest/client/v1/initial_sync.py index d7042786ce..91da0ee573 100644 --- a/synapse/rest/client/v1/initial_sync.py +++ b/synapse/rest/client/v1/initial_sync.py @@ -27,11 +27,12 @@ class InitialSyncRestServlet(RestServlet): super().__init__() self.initial_sync_handler = hs.get_initial_sync_handler() self.auth = hs.get_auth() + self.store = hs.get_datastore() async def on_GET(self, request): requester = await self.auth.get_user_by_req(request) as_client_event = b"raw" not in request.args - pagination_config = PaginationConfig.from_request(request) + pagination_config = await PaginationConfig.from_request(self.store, request) include_archived = parse_boolean(request, "archived", default=False) content = await self.initial_sync_handler.snapshot_all_rooms( user_id=requester.user.to_string(), diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 7e64a2e0fe..b63389e5fe 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -451,6 +451,7 @@ class RoomMemberListRestServlet(RestServlet): super().__init__() self.message_handler = hs.get_message_handler() self.auth = hs.get_auth() + self.store = hs.get_datastore() async def on_GET(self, request, room_id): # TODO support Pagination stream API (limit/tokens) @@ -465,7 +466,7 @@ class RoomMemberListRestServlet(RestServlet): if at_token_string is None: at_token = None else: - at_token = StreamToken.from_string(at_token_string) + at_token = await StreamToken.from_string(self.store, at_token_string) # let you filter down on particular memberships. # XXX: this may not be the best shape for this API - we could pass in a filter @@ -521,10 +522,13 @@ class RoomMessageListRestServlet(RestServlet): super().__init__() self.pagination_handler = hs.get_pagination_handler() self.auth = hs.get_auth() + self.store = hs.get_datastore() async def on_GET(self, request, room_id): requester = await self.auth.get_user_by_req(request, allow_guest=True) - pagination_config = PaginationConfig.from_request(request, default_limit=10) + pagination_config = await PaginationConfig.from_request( + self.store, request, default_limit=10 + ) as_client_event = b"raw" not in request.args filter_str = parse_string(request, b"filter", encoding="utf-8") if filter_str: @@ -580,10 +584,11 @@ class RoomInitialSyncRestServlet(RestServlet): super().__init__() self.initial_sync_handler = hs.get_initial_sync_handler() self.auth = hs.get_auth() + self.store = hs.get_datastore() async def on_GET(self, request, room_id): requester = await self.auth.get_user_by_req(request, allow_guest=True) - pagination_config = PaginationConfig.from_request(request) + pagination_config = await PaginationConfig.from_request(self.store, request) content = await self.initial_sync_handler.room_initial_sync( room_id=room_id, requester=requester, pagin_config=pagination_config ) diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index 7abd6ff333..55c4606569 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -180,6 +180,7 @@ class KeyChangesServlet(RestServlet): super().__init__() self.auth = hs.get_auth() self.device_handler = hs.get_device_handler() + self.store = hs.get_datastore() async def on_GET(self, request): requester = await self.auth.get_user_by_req(request, allow_guest=True) @@ -191,7 +192,7 @@ class KeyChangesServlet(RestServlet): # changes after the "to" as well as before. set_tag("to", parse_string(request, "to")) - from_token = StreamToken.from_string(from_token_string) + from_token = await StreamToken.from_string(self.store, from_token_string) user_id = requester.user.to_string() diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 51e395cc64..6779df952f 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -77,6 +77,7 @@ class SyncRestServlet(RestServlet): super().__init__() self.hs = hs self.auth = hs.get_auth() + self.store = hs.get_datastore() self.sync_handler = hs.get_sync_handler() self.clock = hs.get_clock() self.filtering = hs.get_filtering() @@ -151,10 +152,9 @@ class SyncRestServlet(RestServlet): device_id=device_id, ) + since_token = None if since is not None: - since_token = StreamToken.from_string(since) - else: - since_token = None + since_token = await StreamToken.from_string(self.store, since) # send any outstanding server notices to the user. await self._server_notices_sender.on_user_syncing(user.to_string()) @@ -236,7 +236,7 @@ class SyncRestServlet(RestServlet): "leave": sync_result.groups.leave, }, "device_one_time_keys_count": sync_result.device_one_time_keys_count, - "next_batch": sync_result.next_batch.to_string(), + "next_batch": await sync_result.next_batch.to_string(self.store), } @staticmethod @@ -413,7 +413,7 @@ class SyncRestServlet(RestServlet): result = { "timeline": { "events": serialized_timeline, - "prev_batch": room.timeline.prev_batch.to_string(), + "prev_batch": await room.timeline.prev_batch.to_string(self.store), "limited": room.timeline.limited, }, "state": {"events": serialized_state}, diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py index d7a03cbf7d..ecfc6717b3 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py @@ -42,17 +42,17 @@ class PurgeEventsStore(StateGroupWorkerStore, SQLBaseStore): The set of state groups that are referenced by deleted events. """ + parsed_token = await RoomStreamToken.parse(self, token) + return await self.db_pool.runInteraction( "purge_history", self._purge_history_txn, room_id, - token, + parsed_token, delete_local_events, ) - def _purge_history_txn(self, txn, room_id, token_str, delete_local_events): - token = RoomStreamToken.parse(token_str) - + def _purge_history_txn(self, txn, room_id, token, delete_local_events): # Tables that should be pruned: # event_auth # event_backward_extremities diff --git a/synapse/streams/config.py b/synapse/streams/config.py index 0bdf846edf..fdda21d165 100644 --- a/synapse/streams/config.py +++ b/synapse/streams/config.py @@ -12,7 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import logging from typing import Optional @@ -21,6 +20,7 @@ import attr from synapse.api.errors import SynapseError from synapse.http.servlet import parse_integer, parse_string from synapse.http.site import SynapseRequest +from synapse.storage.databases.main import DataStore from synapse.types import StreamToken logger = logging.getLogger(__name__) @@ -39,8 +39,9 @@ class PaginationConfig: limit = attr.ib(type=Optional[int]) @classmethod - def from_request( + async def from_request( cls, + store: "DataStore", request: SynapseRequest, raise_invalid_params: bool = True, default_limit: Optional[int] = None, @@ -54,13 +55,13 @@ class PaginationConfig: if from_tok == "END": from_tok = None # For backwards compat. elif from_tok: - from_tok = StreamToken.from_string(from_tok) + from_tok = await StreamToken.from_string(store, from_tok) except Exception: raise SynapseError(400, "'from' parameter is invalid") try: if to_tok: - to_tok = StreamToken.from_string(to_tok) + to_tok = await StreamToken.from_string(store, to_tok) except Exception: raise SynapseError(400, "'to' parameter is invalid") diff --git a/synapse/types.py b/synapse/types.py index 02bcc197ec..bd271f9f16 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -18,7 +18,17 @@ import re import string import sys from collections import namedtuple -from typing import Any, Dict, Mapping, MutableMapping, Optional, Tuple, Type, TypeVar +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Mapping, + MutableMapping, + Optional, + Tuple, + Type, + TypeVar, +) import attr from signedjson.key import decode_verify_key_bytes @@ -26,6 +36,9 @@ from unpaddedbase64 import decode_base64 from synapse.api.errors import Codes, SynapseError +if TYPE_CHECKING: + from synapse.storage.databases.main import DataStore + # define a version of typing.Collection that works on python 3.5 if sys.version_info[:3] >= (3, 6, 0): from typing import Collection @@ -393,7 +406,7 @@ class RoomStreamToken: stream = attr.ib(type=int, validator=attr.validators.instance_of(int)) @classmethod - def parse(cls, string: str) -> "RoomStreamToken": + async def parse(cls, store: "DataStore", string: str) -> "RoomStreamToken": try: if string[0] == "s": return cls(topological=None, stream=int(string[1:])) @@ -428,7 +441,7 @@ class RoomStreamToken: def as_tuple(self) -> Tuple[Optional[int], int]: return (self.topological, self.stream) - def __str__(self) -> str: + async def to_string(self, store: "DataStore") -> str: if self.topological is not None: return "t%d-%d" % (self.topological, self.stream) else: @@ -453,18 +466,32 @@ class StreamToken: START = None # type: StreamToken @classmethod - def from_string(cls, string): + async def from_string(cls, store: "DataStore", string: str) -> "StreamToken": try: keys = string.split(cls._SEPARATOR) while len(keys) < len(attr.fields(cls)): # i.e. old token from before receipt_key keys.append("0") - return cls(RoomStreamToken.parse(keys[0]), *(int(k) for k in keys[1:])) + return cls( + await RoomStreamToken.parse(store, keys[0]), *(int(k) for k in keys[1:]) + ) except Exception: raise SynapseError(400, "Invalid Token") - def to_string(self): - return self._SEPARATOR.join([str(k) for k in attr.astuple(self, recurse=False)]) + async def to_string(self, store: "DataStore") -> str: + return self._SEPARATOR.join( + [ + await self.room_key.to_string(store), + str(self.presence_key), + str(self.typing_key), + str(self.receipt_key), + str(self.account_data_key), + str(self.push_rules_key), + str(self.to_device_key), + str(self.device_list_key), + str(self.groups_key), + ] + ) @property def room_stream_id(self): @@ -493,7 +520,7 @@ class StreamToken: return attr.evolve(self, **{key: new_value}) -StreamToken.START = StreamToken.from_string("s0_0") +StreamToken.START = StreamToken(RoomStreamToken(None, 0), 0, 0, 0, 0, 0, 0, 0, 0) @attr.s(slots=True, frozen=True) diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index a3287011e9..0d809d25d5 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -902,16 +902,18 @@ class RoomMessageListTestCase(RoomBase): # Send a first message in the room, which will be removed by the purge. first_event_id = self.helper.send(self.room_id, "message 1")["event_id"] - first_token = str( - self.get_success(store.get_topological_token_for_event(first_event_id)) + first_token = self.get_success( + store.get_topological_token_for_event(first_event_id) ) + first_token_str = self.get_success(first_token.to_string(store)) # Send a second message in the room, which won't be removed, and which we'll # use as the marker to purge events before. second_event_id = self.helper.send(self.room_id, "message 2")["event_id"] - second_token = str( - self.get_success(store.get_topological_token_for_event(second_event_id)) + second_token = self.get_success( + store.get_topological_token_for_event(second_event_id) ) + second_token_str = self.get_success(second_token.to_string(store)) # Send a third event in the room to ensure we don't fall under any edge case # due to our marker being the latest forward extremity in the room. @@ -921,7 +923,11 @@ class RoomMessageListTestCase(RoomBase): request, channel = self.make_request( "GET", "/rooms/%s/messages?access_token=x&from=%s&dir=b&filter=%s" - % (self.room_id, second_token, json.dumps({"types": [EventTypes.Message]})), + % ( + self.room_id, + second_token_str, + json.dumps({"types": [EventTypes.Message]}), + ), ) self.render(request) self.assertEqual(channel.code, 200, channel.json_body) @@ -936,7 +942,7 @@ class RoomMessageListTestCase(RoomBase): pagination_handler._purge_history( purge_id=purge_id, room_id=self.room_id, - token=second_token, + token=second_token_str, delete_local_events=True, ) ) @@ -946,7 +952,11 @@ class RoomMessageListTestCase(RoomBase): request, channel = self.make_request( "GET", "/rooms/%s/messages?access_token=x&from=%s&dir=b&filter=%s" - % (self.room_id, second_token, json.dumps({"types": [EventTypes.Message]})), + % ( + self.room_id, + second_token_str, + json.dumps({"types": [EventTypes.Message]}), + ), ) self.render(request) self.assertEqual(channel.code, 200, channel.json_body) @@ -960,7 +970,11 @@ class RoomMessageListTestCase(RoomBase): request, channel = self.make_request( "GET", "/rooms/%s/messages?access_token=x&from=%s&dir=b&filter=%s" - % (self.room_id, first_token, json.dumps({"types": [EventTypes.Message]})), + % ( + self.room_id, + first_token_str, + json.dumps({"types": [EventTypes.Message]}), + ), ) self.render(request) self.assertEqual(channel.code, 200, channel.json_body) diff --git a/tests/storage/test_purge.py b/tests/storage/test_purge.py index 723cd28933..cc1f3c53c5 100644 --- a/tests/storage/test_purge.py +++ b/tests/storage/test_purge.py @@ -47,12 +47,15 @@ class PurgeTests(HomeserverTestCase): storage = self.hs.get_storage() # Get the topological token - event = str( - self.get_success(store.get_topological_token_for_event(last["event_id"])) + token = self.get_success( + store.get_topological_token_for_event(last["event_id"]) ) + token_str = self.get_success(token.to_string(self.hs.get_datastore())) # Purge everything before this topological token - self.get_success(storage.purge_events.purge_history(self.room_id, event, True)) + self.get_success( + storage.purge_events.purge_history(self.room_id, token_str, True) + ) # 1-3 should fail and last will succeed, meaning that 1-3 are deleted # and last is not. From c1ef579b63a39d8d6fb31ddc1d3cf173eaf7e5b7 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 1 Oct 2020 11:09:12 +0100 Subject: [PATCH 081/134] Add prometheus metrics to track federation delays (#8430) Add a pair of federation metrics to track the delays in sending PDUs to/from particular servers. --- changelog.d/8430.feature | 1 + docs/sample_config.yaml | 12 +++++++++ synapse/config/_util.py | 6 +++-- synapse/config/federation.py | 27 ++++++++++++++++++- synapse/config/homeserver.py | 1 - synapse/config/tls.py | 1 - synapse/federation/federation_server.py | 24 ++++++++++++++++- .../federation/sender/transaction_manager.py | 22 +++++++++++++++ 8 files changed, 88 insertions(+), 6 deletions(-) create mode 100644 changelog.d/8430.feature diff --git a/changelog.d/8430.feature b/changelog.d/8430.feature new file mode 100644 index 0000000000..1f31d42bc1 --- /dev/null +++ b/changelog.d/8430.feature @@ -0,0 +1 @@ +Add prometheus metrics to track federation delays. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 066844b5a9..8a3206e845 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -629,6 +629,7 @@ acme: #tls_fingerprints: [{"sha256": ""}] +## Federation ## # Restrict federation to the following whitelist of domains. # N.B. we recommend also firewalling your federation listener to limit @@ -662,6 +663,17 @@ federation_ip_range_blacklist: - 'fe80::/64' - 'fc00::/7' +# Report prometheus metrics on the age of PDUs being sent to and received from +# the following domains. This can be used to give an idea of "delay" on inbound +# and outbound federation, though be aware that any delay can be due to problems +# at either end or with the intermediate network. +# +# By default, no domains are monitored in this way. +# +#federation_metrics_domains: +# - matrix.org +# - example.com + ## Caching ## diff --git a/synapse/config/_util.py b/synapse/config/_util.py index cd31b1c3c9..c74969a977 100644 --- a/synapse/config/_util.py +++ b/synapse/config/_util.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, List +from typing import Any, Iterable import jsonschema @@ -20,7 +20,9 @@ from synapse.config._base import ConfigError from synapse.types import JsonDict -def validate_config(json_schema: JsonDict, config: Any, config_path: List[str]) -> None: +def validate_config( + json_schema: JsonDict, config: Any, config_path: Iterable[str] +) -> None: """Validates a config setting against a JsonSchema definition This can be used to validate a section of the config file against a schema diff --git a/synapse/config/federation.py b/synapse/config/federation.py index 2c77d8f85b..ffd8fca54e 100644 --- a/synapse/config/federation.py +++ b/synapse/config/federation.py @@ -17,7 +17,8 @@ from typing import Optional from netaddr import IPSet -from ._base import Config, ConfigError +from synapse.config._base import Config, ConfigError +from synapse.config._util import validate_config class FederationConfig(Config): @@ -52,8 +53,18 @@ class FederationConfig(Config): "Invalid range(s) provided in federation_ip_range_blacklist: %s" % e ) + federation_metrics_domains = config.get("federation_metrics_domains") or [] + validate_config( + _METRICS_FOR_DOMAINS_SCHEMA, + federation_metrics_domains, + ("federation_metrics_domains",), + ) + self.federation_metrics_domains = set(federation_metrics_domains) + def generate_config_section(self, config_dir_path, server_name, **kwargs): return """\ + ## Federation ## + # Restrict federation to the following whitelist of domains. # N.B. we recommend also firewalling your federation listener to limit # inbound federation traffic as early as possible, rather than relying @@ -85,4 +96,18 @@ class FederationConfig(Config): - '::1/128' - 'fe80::/64' - 'fc00::/7' + + # Report prometheus metrics on the age of PDUs being sent to and received from + # the following domains. This can be used to give an idea of "delay" on inbound + # and outbound federation, though be aware that any delay can be due to problems + # at either end or with the intermediate network. + # + # By default, no domains are monitored in this way. + # + #federation_metrics_domains: + # - matrix.org + # - example.com """ + + +_METRICS_FOR_DOMAINS_SCHEMA = {"type": "array", "items": {"type": "string"}} diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index 556e291495..be65554524 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -92,5 +92,4 @@ class HomeServerConfig(RootConfig): TracerConfig, WorkerConfig, RedisConfig, - FederationConfig, ] diff --git a/synapse/config/tls.py b/synapse/config/tls.py index e368ea564d..9ddb8b546b 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -471,7 +471,6 @@ class TlsConfig(Config): # or by checking matrix.org/federationtester/api/report?server_name=$host # #tls_fingerprints: [{"sha256": ""}] - """ # Lowercase the string representation of boolean values % { diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 2dcd081cbc..24329dd0e3 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -28,7 +28,7 @@ from typing import ( Union, ) -from prometheus_client import Counter, Histogram +from prometheus_client import Counter, Gauge, Histogram from twisted.internet import defer from twisted.internet.abstract import isIPAddress @@ -88,6 +88,13 @@ pdu_process_time = Histogram( ) +last_pdu_age_metric = Gauge( + "synapse_federation_last_received_pdu_age", + "The age (in seconds) of the last PDU successfully received from the given domain", + labelnames=("server_name",), +) + + class FederationServer(FederationBase): def __init__(self, hs): super().__init__(hs) @@ -118,6 +125,10 @@ class FederationServer(FederationBase): hs, "state_ids_resp", timeout_ms=30000 ) + self._federation_metrics_domains = ( + hs.get_config().federation.federation_metrics_domains + ) + async def on_backfill_request( self, origin: str, room_id: str, versions: List[str], limit: int ) -> Tuple[int, Dict[str, Any]]: @@ -262,7 +273,11 @@ class FederationServer(FederationBase): pdus_by_room = {} # type: Dict[str, List[EventBase]] + newest_pdu_ts = 0 + for p in transaction.pdus: # type: ignore + # FIXME (richardv): I don't think this works: + # https://github.com/matrix-org/synapse/issues/8429 if "unsigned" in p: unsigned = p["unsigned"] if "age" in unsigned: @@ -300,6 +315,9 @@ class FederationServer(FederationBase): event = event_from_pdu_json(p, room_version) pdus_by_room.setdefault(room_id, []).append(event) + if event.origin_server_ts > newest_pdu_ts: + newest_pdu_ts = event.origin_server_ts + pdu_results = {} # we can process different rooms in parallel (which is useful if they @@ -340,6 +358,10 @@ class FederationServer(FederationBase): process_pdus_for_room, pdus_by_room.keys(), TRANSACTION_CONCURRENCY_LIMIT ) + if newest_pdu_ts and origin in self._federation_metrics_domains: + newest_pdu_age = self._clock.time_msec() - newest_pdu_ts + last_pdu_age_metric.labels(server_name=origin).set(newest_pdu_age / 1000) + return pdu_results async def _handle_edus_in_txn(self, origin: str, transaction: Transaction): diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py index c84072ab73..3e07f925e0 100644 --- a/synapse/federation/sender/transaction_manager.py +++ b/synapse/federation/sender/transaction_manager.py @@ -15,6 +15,8 @@ import logging from typing import TYPE_CHECKING, List +from prometheus_client import Gauge + from synapse.api.errors import HttpResponseException from synapse.events import EventBase from synapse.federation.persistence import TransactionActions @@ -34,6 +36,12 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +last_pdu_age_metric = Gauge( + "synapse_federation_last_sent_pdu_age", + "The age (in seconds) of the last PDU successfully sent to the given domain", + labelnames=("server_name",), +) + class TransactionManager: """Helper class which handles building and sending transactions @@ -48,6 +56,10 @@ class TransactionManager: self._transaction_actions = TransactionActions(self._store) self._transport_layer = hs.get_federation_transport_client() + self._federation_metrics_domains = ( + hs.get_config().federation.federation_metrics_domains + ) + # HACK to get unique tx id self._next_txn_id = int(self.clock.time_msec()) @@ -119,6 +131,9 @@ class TransactionManager: # FIXME (erikj): This is a bit of a hack to make the Pdu age # keys work + # FIXME (richardv): I also believe it no longer works. We (now?) store + # "age_ts" in "unsigned" rather than at the top level. See + # https://github.com/matrix-org/synapse/issues/8429. def json_data_cb(): data = transaction.get_dict() now = int(self.clock.time_msec()) @@ -167,5 +182,12 @@ class TransactionManager: ) success = False + if success and pdus and destination in self._federation_metrics_domains: + last_pdu = pdus[-1] + last_pdu_age = self.clock.time_msec() - last_pdu.origin_server_ts + last_pdu_age_metric.labels(server_name=destination).set( + last_pdu_age / 1000 + ) + set_tag(tags.ERROR, not success) return success From 4ff0201e6235b8b2efc5ce5a7dc3c479ea96df53 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 1 Oct 2020 08:09:18 -0400 Subject: [PATCH 082/134] Enable mypy checking for unreachable code and fix instances. (#8432) --- changelog.d/8432.misc | 1 + mypy.ini | 1 + synapse/config/tls.py | 18 +++++++++--------- synapse/federation/federation_server.py | 5 ++--- synapse/handlers/directory.py | 2 +- synapse/handlers/room.py | 2 -- synapse/handlers/room_member.py | 2 +- synapse/handlers/sync.py | 2 +- synapse/http/server.py | 4 ++-- synapse/logging/_structured.py | 10 +--------- synapse/push/push_rule_evaluator.py | 4 ++-- synapse/replication/tcp/protocol.py | 10 ++++++---- synapse/state/__init__.py | 2 +- .../storage/databases/main/censor_events.py | 6 +++--- synapse/storage/databases/main/events.py | 18 +++++------------- synapse/storage/databases/main/stream.py | 2 +- synapse/storage/util/id_generators.py | 2 +- 17 files changed, 38 insertions(+), 53 deletions(-) create mode 100644 changelog.d/8432.misc diff --git a/changelog.d/8432.misc b/changelog.d/8432.misc new file mode 100644 index 0000000000..01fdad4caf --- /dev/null +++ b/changelog.d/8432.misc @@ -0,0 +1 @@ +Check for unreachable code with mypy. diff --git a/mypy.ini b/mypy.ini index 7986781432..c283f15b21 100644 --- a/mypy.ini +++ b/mypy.ini @@ -6,6 +6,7 @@ check_untyped_defs = True show_error_codes = True show_traceback = True mypy_path = stubs +warn_unreachable = True files = synapse/api, synapse/appservice, diff --git a/synapse/config/tls.py b/synapse/config/tls.py index 9ddb8b546b..ad37b93c02 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -18,7 +18,7 @@ import os import warnings from datetime import datetime from hashlib import sha256 -from typing import List +from typing import List, Optional from unpaddedbase64 import encode_base64 @@ -177,8 +177,8 @@ class TlsConfig(Config): "use_insecure_ssl_client_just_for_testing_do_not_use" ) - self.tls_certificate = None - self.tls_private_key = None + self.tls_certificate = None # type: Optional[crypto.X509] + self.tls_private_key = None # type: Optional[crypto.PKey] def is_disk_cert_valid(self, allow_self_signed=True): """ @@ -226,12 +226,12 @@ class TlsConfig(Config): days_remaining = (expires_on - now).days return days_remaining - def read_certificate_from_disk(self, require_cert_and_key): + def read_certificate_from_disk(self, require_cert_and_key: bool): """ Read the certificates and private key from disk. Args: - require_cert_and_key (bool): set to True to throw an error if the certificate + require_cert_and_key: set to True to throw an error if the certificate and key file are not given """ if require_cert_and_key: @@ -479,13 +479,13 @@ class TlsConfig(Config): } ) - def read_tls_certificate(self): + def read_tls_certificate(self) -> crypto.X509: """Reads the TLS certificate from the configured file, and returns it Also checks if it is self-signed, and warns if so Returns: - OpenSSL.crypto.X509: the certificate + The certificate """ cert_path = self.tls_certificate_file logger.info("Loading TLS certificate from %s", cert_path) @@ -504,11 +504,11 @@ class TlsConfig(Config): return cert - def read_tls_private_key(self): + def read_tls_private_key(self) -> crypto.PKey: """Reads the TLS private key from the configured file, and returns it Returns: - OpenSSL.crypto.PKey: the private key + The private key """ private_key_path = self.tls_private_key_file logger.info("Loading TLS key from %s", private_key_path) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 24329dd0e3..02f11e1209 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -22,7 +22,6 @@ from typing import ( Callable, Dict, List, - Match, Optional, Tuple, Union, @@ -825,14 +824,14 @@ def server_matches_acl_event(server_name: str, acl_event: EventBase) -> bool: return False -def _acl_entry_matches(server_name: str, acl_entry: str) -> Match: +def _acl_entry_matches(server_name: str, acl_entry: Any) -> bool: if not isinstance(acl_entry, str): logger.warning( "Ignoring non-str ACL entry '%s' (is %s)", acl_entry, type(acl_entry) ) return False regex = glob_to_regex(acl_entry) - return regex.match(server_name) + return bool(regex.match(server_name)) class FederationHandlerRegistry: diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 62aa9a2da8..6f15c68240 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -383,7 +383,7 @@ class DirectoryHandler(BaseHandler): """ creator = await self.store.get_room_alias_creator(alias.to_string()) - if creator is not None and creator == user_id: + if creator == user_id: return True # Resolve the alias to the corresponding room. diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index d5f7c78edf..f1a6699cd4 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -962,8 +962,6 @@ class RoomCreationHandler(BaseHandler): try: random_string = stringutils.random_string(18) gen_room_id = RoomID(random_string, self.hs.hostname).to_string() - if isinstance(gen_room_id, bytes): - gen_room_id = gen_room_id.decode("utf-8") await self.store.store_room( room_id=gen_room_id, room_creator_user_id=creator_id, diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 8feba8c90a..5ec36f591d 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -642,7 +642,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): async def send_membership_event( self, - requester: Requester, + requester: Optional[Requester], event: EventBase, context: EventContext, ratelimit: bool = True, diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index bfe2583002..260ec19b41 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -87,7 +87,7 @@ class SyncConfig: class TimelineBatch: prev_batch = attr.ib(type=StreamToken) events = attr.ib(type=List[EventBase]) - limited = attr.ib(bool) + limited = attr.ib(type=bool) def __bool__(self) -> bool: """Make the result appear empty if there are no updates. This is used diff --git a/synapse/http/server.py b/synapse/http/server.py index 996a31a9ec..09ed74f6ce 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -257,7 +257,7 @@ class _AsyncResource(resource.Resource, metaclass=abc.ABCMeta): if isinstance(raw_callback_return, (defer.Deferred, types.CoroutineType)): callback_return = await raw_callback_return else: - callback_return = raw_callback_return + callback_return = raw_callback_return # type: ignore return callback_return @@ -406,7 +406,7 @@ class JsonResource(DirectServeJsonResource): if isinstance(raw_callback_return, (defer.Deferred, types.CoroutineType)): callback_return = await raw_callback_return else: - callback_return = raw_callback_return + callback_return = raw_callback_return # type: ignore return callback_return diff --git a/synapse/logging/_structured.py b/synapse/logging/_structured.py index 144506c8f2..0fc2ea609e 100644 --- a/synapse/logging/_structured.py +++ b/synapse/logging/_structured.py @@ -12,7 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import logging import os.path import sys @@ -89,14 +88,7 @@ class LogContextObserver: context = current_context() # Copy the context information to the log event. - if context is not None: - context.copy_to_twisted_log_entry(event) - else: - # If there's no logging context, not even the root one, we might be - # starting up or it might be from non-Synapse code. Log it as if it - # came from the root logger. - event["request"] = None - event["scope"] = None + context.copy_to_twisted_log_entry(event) self.observer(event) diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index 709ace01e5..3a68ce636f 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -16,7 +16,7 @@ import logging import re -from typing import Any, Dict, List, Pattern, Union +from typing import Any, Dict, List, Optional, Pattern, Union from synapse.events import EventBase from synapse.types import UserID @@ -181,7 +181,7 @@ class PushRuleEvaluatorForEvent: return r.search(body) - def _get_value(self, dotted_key: str) -> str: + def _get_value(self, dotted_key: str) -> Optional[str]: return self._value_cache.get(dotted_key, None) diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index 0b0d204e64..a509e599c2 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -51,10 +51,11 @@ import fcntl import logging import struct from inspect import isawaitable -from typing import TYPE_CHECKING, List +from typing import TYPE_CHECKING, List, Optional from prometheus_client import Counter +from twisted.internet import task from twisted.protocols.basic import LineOnlyReceiver from twisted.python.failure import Failure @@ -152,9 +153,10 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver): self.last_received_command = self.clock.time_msec() self.last_sent_command = 0 - self.time_we_closed = None # When we requested the connection be closed + # When we requested the connection be closed + self.time_we_closed = None # type: Optional[int] - self.received_ping = False # Have we reecived a ping from the other side + self.received_ping = False # Have we received a ping from the other side self.state = ConnectionStates.CONNECTING @@ -165,7 +167,7 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver): self.pending_commands = [] # type: List[Command] # The LoopingCall for sending pings. - self._send_ping_loop = None + self._send_ping_loop = None # type: Optional[task.LoopingCall] # a logcontext which we use for processing incoming commands. We declare it as a # background process so that the CPU stats get reported to prometheus. diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 31082bb16a..5b0900aa3c 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -738,7 +738,7 @@ def _make_state_cache_entry( # failing that, look for the closest match. prev_group = None - delta_ids = None + delta_ids = None # type: Optional[StateMap[str]] for old_group, old_state in state_groups_ids.items(): n_delta_ids = {k: v for k, v in new_state.items() if old_state.get(k) != v} diff --git a/synapse/storage/databases/main/censor_events.py b/synapse/storage/databases/main/censor_events.py index f211ddbaf8..4bb2b9c28c 100644 --- a/synapse/storage/databases/main/censor_events.py +++ b/synapse/storage/databases/main/censor_events.py @@ -21,8 +21,8 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage._base import SQLBaseStore from synapse.storage.database import DatabasePool from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore -from synapse.storage.databases.main.events import encode_json from synapse.storage.databases.main.events_worker import EventsWorkerStore +from synapse.util.frozenutils import frozendict_json_encoder if TYPE_CHECKING: from synapse.server import HomeServer @@ -105,7 +105,7 @@ class CensorEventsStore(EventsWorkerStore, CacheInvalidationWorkerStore, SQLBase and original_event.internal_metadata.is_redacted() ): # Redaction was allowed - pruned_json = encode_json( + pruned_json = frozendict_json_encoder.encode( prune_event_dict( original_event.room_version, original_event.get_dict() ) @@ -171,7 +171,7 @@ class CensorEventsStore(EventsWorkerStore, CacheInvalidationWorkerStore, SQLBase return # Prune the event's dict then convert it to JSON. - pruned_json = encode_json( + pruned_json = frozendict_json_encoder.encode( prune_event_dict(event.room_version, event.get_dict()) ) diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 18def01f50..78e645592f 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -52,16 +52,6 @@ event_counter = Counter( ) -def encode_json(json_object): - """ - Encode a Python object as JSON and return it in a Unicode string. - """ - out = frozendict_json_encoder.encode(json_object) - if isinstance(out, bytes): - out = out.decode("utf8") - return out - - _EventCacheEntry = namedtuple("_EventCacheEntry", ("event", "redacted_event")) @@ -743,7 +733,9 @@ class PersistEventsStore: logger.exception("") raise - metadata_json = encode_json(event.internal_metadata.get_dict()) + metadata_json = frozendict_json_encoder.encode( + event.internal_metadata.get_dict() + ) sql = "UPDATE event_json SET internal_metadata = ? WHERE event_id = ?" txn.execute(sql, (metadata_json, event.event_id)) @@ -797,10 +789,10 @@ class PersistEventsStore: { "event_id": event.event_id, "room_id": event.room_id, - "internal_metadata": encode_json( + "internal_metadata": frozendict_json_encoder.encode( event.internal_metadata.get_dict() ), - "json": encode_json(event_dict(event)), + "json": frozendict_json_encoder.encode(event_dict(event)), "format_version": event.format_version, } for event, _ in events_and_contexts diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 37249f1e3f..1d27439536 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -546,7 +546,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore, metaclass=abc.ABCMeta): async def get_room_event_before_stream_ordering( self, room_id: str, stream_ordering: int - ) -> Tuple[int, int, str]: + ) -> Optional[Tuple[int, int, str]]: """Gets details of the first event in a room at or before a stream ordering Args: diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 02fbb656e8..ec356b2e4f 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -421,7 +421,7 @@ class MultiWriterIdGenerator: self._unfinished_ids.discard(next_id) self._finished_ids.add(next_id) - new_cur = None + new_cur = None # type: Optional[int] if self._unfinished_ids: # If there are unfinished IDs then the new position will be the From cc40a59b4a94534105667ae95fd5602ebdc57dce Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 1 Oct 2020 13:14:56 +0100 Subject: [PATCH 083/134] 1.21.0 --- CHANGES.md | 102 +++++++++++++++++++++++++++++++++++++++ changelog.d/7124.bugfix | 1 - changelog.d/7796.bugfix | 1 - changelog.d/7905.bugfix | 1 - changelog.d/8004.feature | 1 - changelog.d/8208.misc | 1 - changelog.d/8216.misc | 1 - changelog.d/8217.feature | 1 - changelog.d/8227.doc | 1 - changelog.d/8230.bugfix | 1 - changelog.d/8236.bugfix | 1 - changelog.d/8243.misc | 1 - changelog.d/8247.bugfix | 1 - changelog.d/8248.feature | 1 - changelog.d/8250.misc | 1 - changelog.d/8256.misc | 1 - changelog.d/8257.misc | 1 - changelog.d/8258.bugfix | 1 - changelog.d/8259.misc | 1 - changelog.d/8260.misc | 1 - changelog.d/8261.misc | 1 - changelog.d/8262.bugfix | 1 - changelog.d/8265.bugfix | 1 - changelog.d/8268.bugfix | 1 - changelog.d/8272.bugfix | 1 - changelog.d/8275.feature | 1 - changelog.d/8278.bugfix | 1 - changelog.d/8279.misc | 1 - changelog.d/8281.misc | 1 - changelog.d/8282.misc | 1 - changelog.d/8287.bugfix | 1 - changelog.d/8288.misc | 1 - changelog.d/8294.feature | 1 - changelog.d/8296.misc | 1 - changelog.d/8305.feature | 1 - changelog.d/8306.feature | 1 - changelog.d/8317.feature | 1 - changelog.d/8320.feature | 1 - changelog.d/8322.bugfix | 1 - changelog.d/8324.bugfix | 1 - changelog.d/8326.misc | 1 - changelog.d/8329.bugfix | 1 - changelog.d/8330.misc | 1 - changelog.d/8331.misc | 1 - changelog.d/8335.misc | 1 - changelog.d/8337.misc | 1 - changelog.d/8344.misc | 1 - changelog.d/8345.feature | 1 - changelog.d/8353.bugfix | 1 - changelog.d/8354.misc | 1 - changelog.d/8362.bugfix | 1 - changelog.d/8364.bugfix | 2 - changelog.d/8370.misc | 1 - changelog.d/8371.misc | 1 - changelog.d/8372.misc | 1 - changelog.d/8373.bugfix | 1 - changelog.d/8374.bugfix | 1 - changelog.d/8375.doc | 1 - changelog.d/8377.misc | 1 - changelog.d/8383.misc | 1 - changelog.d/8385.bugfix | 1 - changelog.d/8386.bugfix | 1 - changelog.d/8387.feature | 1 - changelog.d/8388.misc | 1 - changelog.d/8396.feature | 1 - changelog.d/8398.bugfix | 1 - changelog.d/8399.misc | 1 - changelog.d/8400.bugfix | 1 - changelog.d/8401.misc | 1 - changelog.d/8402.misc | 1 - changelog.d/8404.misc | 1 - changelog.d/8405.feature | 1 - changelog.d/8406.feature | 1 - changelog.d/8410.bugfix | 1 - changelog.d/8413.feature | 1 - changelog.d/8414.bugfix | 1 - changelog.d/8415.doc | 1 - changelog.d/8417.feature | 1 - changelog.d/8419.feature | 1 - changelog.d/8420.feature | 1 - changelog.d/8422.misc | 1 - changelog.d/8423.misc | 1 - changelog.d/8425.feature | 1 - changelog.d/8426.removal | 1 - changelog.d/8427.misc | 1 - changelog.d/8430.feature | 1 - synapse/__init__.py | 2 +- 87 files changed, 103 insertions(+), 87 deletions(-) delete mode 100644 changelog.d/7124.bugfix delete mode 100644 changelog.d/7796.bugfix delete mode 100644 changelog.d/7905.bugfix delete mode 100644 changelog.d/8004.feature delete mode 100644 changelog.d/8208.misc delete mode 100644 changelog.d/8216.misc delete mode 100644 changelog.d/8217.feature delete mode 100644 changelog.d/8227.doc delete mode 100644 changelog.d/8230.bugfix delete mode 100644 changelog.d/8236.bugfix delete mode 100644 changelog.d/8243.misc delete mode 100644 changelog.d/8247.bugfix delete mode 100644 changelog.d/8248.feature delete mode 100644 changelog.d/8250.misc delete mode 100644 changelog.d/8256.misc delete mode 100644 changelog.d/8257.misc delete mode 100644 changelog.d/8258.bugfix delete mode 100644 changelog.d/8259.misc delete mode 100644 changelog.d/8260.misc delete mode 100644 changelog.d/8261.misc delete mode 100644 changelog.d/8262.bugfix delete mode 100644 changelog.d/8265.bugfix delete mode 100644 changelog.d/8268.bugfix delete mode 100644 changelog.d/8272.bugfix delete mode 100644 changelog.d/8275.feature delete mode 100644 changelog.d/8278.bugfix delete mode 100644 changelog.d/8279.misc delete mode 100644 changelog.d/8281.misc delete mode 100644 changelog.d/8282.misc delete mode 100644 changelog.d/8287.bugfix delete mode 100644 changelog.d/8288.misc delete mode 100644 changelog.d/8294.feature delete mode 100644 changelog.d/8296.misc delete mode 100644 changelog.d/8305.feature delete mode 100644 changelog.d/8306.feature delete mode 100644 changelog.d/8317.feature delete mode 100644 changelog.d/8320.feature delete mode 100644 changelog.d/8322.bugfix delete mode 100644 changelog.d/8324.bugfix delete mode 100644 changelog.d/8326.misc delete mode 100644 changelog.d/8329.bugfix delete mode 100644 changelog.d/8330.misc delete mode 100644 changelog.d/8331.misc delete mode 100644 changelog.d/8335.misc delete mode 100644 changelog.d/8337.misc delete mode 100644 changelog.d/8344.misc delete mode 100644 changelog.d/8345.feature delete mode 100644 changelog.d/8353.bugfix delete mode 100644 changelog.d/8354.misc delete mode 100644 changelog.d/8362.bugfix delete mode 100644 changelog.d/8364.bugfix delete mode 100644 changelog.d/8370.misc delete mode 100644 changelog.d/8371.misc delete mode 100644 changelog.d/8372.misc delete mode 100644 changelog.d/8373.bugfix delete mode 100644 changelog.d/8374.bugfix delete mode 100644 changelog.d/8375.doc delete mode 100644 changelog.d/8377.misc delete mode 100644 changelog.d/8383.misc delete mode 100644 changelog.d/8385.bugfix delete mode 100644 changelog.d/8386.bugfix delete mode 100644 changelog.d/8387.feature delete mode 100644 changelog.d/8388.misc delete mode 100644 changelog.d/8396.feature delete mode 100644 changelog.d/8398.bugfix delete mode 100644 changelog.d/8399.misc delete mode 100644 changelog.d/8400.bugfix delete mode 100644 changelog.d/8401.misc delete mode 100644 changelog.d/8402.misc delete mode 100644 changelog.d/8404.misc delete mode 100644 changelog.d/8405.feature delete mode 100644 changelog.d/8406.feature delete mode 100644 changelog.d/8410.bugfix delete mode 100644 changelog.d/8413.feature delete mode 100644 changelog.d/8414.bugfix delete mode 100644 changelog.d/8415.doc delete mode 100644 changelog.d/8417.feature delete mode 100644 changelog.d/8419.feature delete mode 100644 changelog.d/8420.feature delete mode 100644 changelog.d/8422.misc delete mode 100644 changelog.d/8423.misc delete mode 100644 changelog.d/8425.feature delete mode 100644 changelog.d/8426.removal delete mode 100644 changelog.d/8427.misc delete mode 100644 changelog.d/8430.feature diff --git a/CHANGES.md b/CHANGES.md index 5de819ea1e..38906ade49 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,105 @@ +Synapse 1.21.0 (2020-10-01) +=========================== + +Features +-------- + +- Require the user to confirm that their password should be reset after clicking the email confirmation link. ([\#8004](https://github.com/matrix-org/synapse/issues/8004)) +- Add an admin API `GET /_synapse/admin/v1/event_reports` to read entries of table `event_reports`. Contributed by @dklimpel. ([\#8217](https://github.com/matrix-org/synapse/issues/8217)) +- Consolidate the SSO error template across all configuration. ([\#8248](https://github.com/matrix-org/synapse/issues/8248), [\#8405](https://github.com/matrix-org/synapse/issues/8405)) +- Add a configuration option to specify a whitelist of domains that a user can be redirected to after validating their email or phone number. ([\#8275](https://github.com/matrix-org/synapse/issues/8275), [\#8417](https://github.com/matrix-org/synapse/issues/8417)) +- Add experimental support for sharding event persister. ([\#8294](https://github.com/matrix-org/synapse/issues/8294), [\#8387](https://github.com/matrix-org/synapse/issues/8387), [\#8396](https://github.com/matrix-org/synapse/issues/8396), [\#8419](https://github.com/matrix-org/synapse/issues/8419)) +- Add the room topic and avatar to the room details admin API. ([\#8305](https://github.com/matrix-org/synapse/issues/8305)) +- Add an admin API for querying rooms where a user is a member. Contributed by @dklimpel. ([\#8306](https://github.com/matrix-org/synapse/issues/8306)) +- Add `uk.half-shot.msc2778.login.application_service` login type to allow appservices to login. ([\#8320](https://github.com/matrix-org/synapse/issues/8320)) +- Add a configuration option that allows existing users to log in with OpenID Connect. Contributed by @BBBSnowball and @OmmyZhang. ([\#8345](https://github.com/matrix-org/synapse/issues/8345)) +- Add prometheus metrics for replication requests. ([\#8406](https://github.com/matrix-org/synapse/issues/8406)) +- Support passing additional single sign-on parameters to the client. ([\#8413](https://github.com/matrix-org/synapse/issues/8413)) +- Add experimental reporting of metrics on expensive rooms for state-resolution. ([\#8420](https://github.com/matrix-org/synapse/issues/8420)) +- Add experimental prometheus metric to track numbers of "large" rooms for state resolutiom. ([\#8425](https://github.com/matrix-org/synapse/issues/8425)) +- Add prometheus metrics to track federation delays. ([\#8430](https://github.com/matrix-org/synapse/issues/8430)) + + +Bugfixes +-------- + +- Fix a bug in the media repository where remote thumbnails with the same size but different crop methods would overwrite each other. Contributed by @deepbluev7. ([\#7124](https://github.com/matrix-org/synapse/issues/7124)) +- Fix inconsistent handling of non-existent push rules, and stop tracking the `enabled` state of removed push rules. ([\#7796](https://github.com/matrix-org/synapse/issues/7796)) +- Fix a longstanding bug when storing a media file with an empty `upload_name`. ([\#7905](https://github.com/matrix-org/synapse/issues/7905)) +- Fix messages not being sent over federation until an event is sent into the same room. ([\#8230](https://github.com/matrix-org/synapse/issues/8230), [\#8247](https://github.com/matrix-org/synapse/issues/8247), [\#8258](https://github.com/matrix-org/synapse/issues/8258), [\#8272](https://github.com/matrix-org/synapse/issues/8272), [\#8322](https://github.com/matrix-org/synapse/issues/8322)) +- Fix a longstanding bug where files that could not be thumbnailed would result in an Internal Server Error. ([\#8236](https://github.com/matrix-org/synapse/issues/8236)) +- Upgrade minimum version of `canonicaljson` to version 1.4.0, to fix an unicode encoding issue. ([\#8262](https://github.com/matrix-org/synapse/issues/8262)) +- Fix logstanding bug which could lead to incomplete database upgrades on SQLite. ([\#8265](https://github.com/matrix-org/synapse/issues/8265)) +- Fix stack overflow when stderr is redirected to the logging system, and the logging system encounters an error. ([\#8268](https://github.com/matrix-org/synapse/issues/8268)) +- Fix a bug which cause the logging system to report errors, if `DEBUG` was enabled and no `context` filter was applied. ([\#8278](https://github.com/matrix-org/synapse/issues/8278)) +- Fix edge case where push could get delayed for a user until a later event was pushed. ([\#8287](https://github.com/matrix-org/synapse/issues/8287)) +- Fix fetching malformed events from remote servers. ([\#8324](https://github.com/matrix-org/synapse/issues/8324)) +- Fix `UnboundLocalError` from occuring when appservices send a malformed register request. ([\#8329](https://github.com/matrix-org/synapse/issues/8329)) +- Don't send push notifications to expired user accounts. ([\#8353](https://github.com/matrix-org/synapse/issues/8353)) +- Fix a regression in v1.19.0 with reactivating users through the admin API. ([\#8362](https://github.com/matrix-org/synapse/issues/8362)) +- Fix a bug where during device registration the length of the device name wasn't limited. ([\#8364](https://github.com/matrix-org/synapse/issues/8364)) +- Include `guest_access` in the fields that are checked for null bytes when updating `room_stats_state`. Broke in v1.7.2. ([\#8373](https://github.com/matrix-org/synapse/issues/8373)) +- Fix theoretical race condition where events are not sent down `/sync` if the synchrotron worker is restarted without restarting other workers. ([\#8374](https://github.com/matrix-org/synapse/issues/8374)) +- Fix a bug which could cause errors in rooms with malformed membership events, on servers using sqlite. ([\#8385](https://github.com/matrix-org/synapse/issues/8385)) +- Fix a bug introduced in v1.20.0 which caused the `synapse_port_db` script to fail. ([\#8386](https://github.com/matrix-org/synapse/issues/8386)) +- Fix "Re-starting finished log context" warning when receiving an event we already had over federation. ([\#8398](https://github.com/matrix-org/synapse/issues/8398)) +- Fix incorrect handling of timeouts on outgoing HTTP requests. ([\#8400](https://github.com/matrix-org/synapse/issues/8400)) +- Fix a regression in v1.20.0 in the `synapse_port_db` script regarding the `ui_auth_sessions_ips` table. ([\#8410](https://github.com/matrix-org/synapse/issues/8410)) +- Remove unnecessary 3PID registration check when resetting password via an email address. Bug introduced in v0.34.0rc2. ([\#8414](https://github.com/matrix-org/synapse/issues/8414)) + + +Improved Documentation +---------------------- + +- Add `/_synapse/client` to the reverse proxy documentation. ([\#8227](https://github.com/matrix-org/synapse/issues/8227)) +- Add note to the reverse proxy settings documentation about disabling Apache's mod_security2. Contributed by Julian Fietkau (@jfietkau). ([\#8375](https://github.com/matrix-org/synapse/issues/8375)) +- Improve description of `server_name` config option in `homserver.yaml`. ([\#8415](https://github.com/matrix-org/synapse/issues/8415)) + + +Deprecations and Removals +------------------------- + +- Drop support for `prometheus_client` older than 0.4.0. ([\#8426](https://github.com/matrix-org/synapse/issues/8426)) + + +Internal Changes +---------------- + +- Fix tests on distros which disable TLSv1.0. Contributed by @danc86. ([\#8208](https://github.com/matrix-org/synapse/issues/8208)) +- Simplify the distributor code to avoid unnecessary work. ([\#8216](https://github.com/matrix-org/synapse/issues/8216)) +- Remove the `populate_stats_process_rooms_2` background job and restore functionality to `populate_stats_process_rooms`. ([\#8243](https://github.com/matrix-org/synapse/issues/8243)) +- Clean up type hints for `PaginationConfig`. ([\#8250](https://github.com/matrix-org/synapse/issues/8250), [\#8282](https://github.com/matrix-org/synapse/issues/8282)) +- Track the latest event for every destination and room for catch-up after federation outage. ([\#8256](https://github.com/matrix-org/synapse/issues/8256)) +- Fix non-user visible bug in implementation of `MultiWriterIdGenerator.get_current_token_for_writer`. ([\#8257](https://github.com/matrix-org/synapse/issues/8257)) +- Switch to the JSON implementation from the standard library. ([\#8259](https://github.com/matrix-org/synapse/issues/8259)) +- Add type hints to `synapse.util.async_helpers`. ([\#8260](https://github.com/matrix-org/synapse/issues/8260)) +- Simplify tests that mock asynchronous functions. ([\#8261](https://github.com/matrix-org/synapse/issues/8261)) +- Add type hints to `StreamToken` and `RoomStreamToken` classes. ([\#8279](https://github.com/matrix-org/synapse/issues/8279)) +- Change `StreamToken.room_key` to be a `RoomStreamToken` instance. ([\#8281](https://github.com/matrix-org/synapse/issues/8281)) +- Refactor notifier code to correctly use the max event stream position. ([\#8288](https://github.com/matrix-org/synapse/issues/8288)) +- Use slotted classes where possible. ([\#8296](https://github.com/matrix-org/synapse/issues/8296)) +- Support testing the local Synapse checkout against the [Complement homeserver test suite](https://github.com/matrix-org/complement/). ([\#8317](https://github.com/matrix-org/synapse/issues/8317)) +- Update outdated usages of `metaclass` to python 3 syntax. ([\#8326](https://github.com/matrix-org/synapse/issues/8326)) +- Move lint-related dependencies to package-extra field, update CONTRIBUTING.md to utilise this. ([\#8330](https://github.com/matrix-org/synapse/issues/8330), [\#8377](https://github.com/matrix-org/synapse/issues/8377)) +- Use the `admin_patterns` helper in additional locations. ([\#8331](https://github.com/matrix-org/synapse/issues/8331)) +- Fix test logging to allow braces in log output. ([\#8335](https://github.com/matrix-org/synapse/issues/8335)) +- Remove `__future__` imports related to Python 2 compatibility. ([\#8337](https://github.com/matrix-org/synapse/issues/8337)) +- Simplify `super()` calls to Python 3 syntax. ([\#8344](https://github.com/matrix-org/synapse/issues/8344)) +- Fix bad merge from `release-v1.20.0` branch to `develop`. ([\#8354](https://github.com/matrix-org/synapse/issues/8354)) +- Factor out a `_send_dummy_event_for_room` method. ([\#8370](https://github.com/matrix-org/synapse/issues/8370)) +- Improve logging of state resolution. ([\#8371](https://github.com/matrix-org/synapse/issues/8371)) +- Add type annotations to `SimpleHttpClient`. ([\#8372](https://github.com/matrix-org/synapse/issues/8372)) +- Refactor ID generators to use `async with` syntax. ([\#8383](https://github.com/matrix-org/synapse/issues/8383)) +- Add `EventStreamPosition` type. ([\#8388](https://github.com/matrix-org/synapse/issues/8388)) +- Create a mechanism for marking tests "logcontext clean". ([\#8399](https://github.com/matrix-org/synapse/issues/8399)) +- A pair of tiny cleanups in the federation request code. ([\#8401](https://github.com/matrix-org/synapse/issues/8401)) +- Add checks on startup that PostgreSQL sequences are consistent with their associated tables. ([\#8402](https://github.com/matrix-org/synapse/issues/8402)) +- Do not include appservice users when calculating the total MAU for a server. ([\#8404](https://github.com/matrix-org/synapse/issues/8404)) +- Typing fixes for `synapse.handlers.federation`. ([\#8422](https://github.com/matrix-org/synapse/issues/8422)) +- Various refactors to simplify stream token handling. ([\#8423](https://github.com/matrix-org/synapse/issues/8423)) +- Make stream token serializing/deserializing async. ([\#8427](https://github.com/matrix-org/synapse/issues/8427)) + + Synapse 1.20.1 (2020-09-24) =========================== diff --git a/changelog.d/7124.bugfix b/changelog.d/7124.bugfix deleted file mode 100644 index 8fd177780d..0000000000 --- a/changelog.d/7124.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug in the media repository where remote thumbnails with the same size but different crop methods would overwrite each other. Contributed by @deepbluev7. diff --git a/changelog.d/7796.bugfix b/changelog.d/7796.bugfix deleted file mode 100644 index 65e5eb42a2..0000000000 --- a/changelog.d/7796.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix inconsistent handling of non-existent push rules, and stop tracking the `enabled` state of removed push rules. diff --git a/changelog.d/7905.bugfix b/changelog.d/7905.bugfix deleted file mode 100644 index e60e624412..0000000000 --- a/changelog.d/7905.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a longstanding bug when storing a media file with an empty `upload_name`. diff --git a/changelog.d/8004.feature b/changelog.d/8004.feature deleted file mode 100644 index a91b75e0e0..0000000000 --- a/changelog.d/8004.feature +++ /dev/null @@ -1 +0,0 @@ -Require the user to confirm that their password should be reset after clicking the email confirmation link. \ No newline at end of file diff --git a/changelog.d/8208.misc b/changelog.d/8208.misc deleted file mode 100644 index e65da88c46..0000000000 --- a/changelog.d/8208.misc +++ /dev/null @@ -1 +0,0 @@ -Fix tests on distros which disable TLSv1.0. Contributed by @danc86. diff --git a/changelog.d/8216.misc b/changelog.d/8216.misc deleted file mode 100644 index b38911b0e5..0000000000 --- a/changelog.d/8216.misc +++ /dev/null @@ -1 +0,0 @@ -Simplify the distributor code to avoid unnecessary work. diff --git a/changelog.d/8217.feature b/changelog.d/8217.feature deleted file mode 100644 index 899cbf14ef..0000000000 --- a/changelog.d/8217.feature +++ /dev/null @@ -1 +0,0 @@ -Add an admin API `GET /_synapse/admin/v1/event_reports` to read entries of table `event_reports`. Contributed by @dklimpel. \ No newline at end of file diff --git a/changelog.d/8227.doc b/changelog.d/8227.doc deleted file mode 100644 index 4a43015a83..0000000000 --- a/changelog.d/8227.doc +++ /dev/null @@ -1 +0,0 @@ -Add `/_synapse/client` to the reverse proxy documentation. diff --git a/changelog.d/8230.bugfix b/changelog.d/8230.bugfix deleted file mode 100644 index 532d0e22fe..0000000000 --- a/changelog.d/8230.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix messages over federation being lost until an event is sent into the same room. diff --git a/changelog.d/8236.bugfix b/changelog.d/8236.bugfix deleted file mode 100644 index 6f04871015..0000000000 --- a/changelog.d/8236.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a longstanding bug where files that could not be thumbnailed would result in an Internal Server Error. diff --git a/changelog.d/8243.misc b/changelog.d/8243.misc deleted file mode 100644 index f7375d32d3..0000000000 --- a/changelog.d/8243.misc +++ /dev/null @@ -1 +0,0 @@ -Remove the 'populate_stats_process_rooms_2' background job and restore functionality to 'populate_stats_process_rooms'. \ No newline at end of file diff --git a/changelog.d/8247.bugfix b/changelog.d/8247.bugfix deleted file mode 100644 index 532d0e22fe..0000000000 --- a/changelog.d/8247.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix messages over federation being lost until an event is sent into the same room. diff --git a/changelog.d/8248.feature b/changelog.d/8248.feature deleted file mode 100644 index f3c4a74bc7..0000000000 --- a/changelog.d/8248.feature +++ /dev/null @@ -1 +0,0 @@ -Consolidate the SSO error template across all configuration. diff --git a/changelog.d/8250.misc b/changelog.d/8250.misc deleted file mode 100644 index b6896a9300..0000000000 --- a/changelog.d/8250.misc +++ /dev/null @@ -1 +0,0 @@ -Clean up type hints for `PaginationConfig`. diff --git a/changelog.d/8256.misc b/changelog.d/8256.misc deleted file mode 100644 index bf0ba76730..0000000000 --- a/changelog.d/8256.misc +++ /dev/null @@ -1 +0,0 @@ -Track the latest event for every destination and room for catch-up after federation outage. diff --git a/changelog.d/8257.misc b/changelog.d/8257.misc deleted file mode 100644 index 47ac583eb4..0000000000 --- a/changelog.d/8257.misc +++ /dev/null @@ -1 +0,0 @@ -Fix non-user visible bug in implementation of `MultiWriterIdGenerator.get_current_token_for_writer`. diff --git a/changelog.d/8258.bugfix b/changelog.d/8258.bugfix deleted file mode 100644 index 532d0e22fe..0000000000 --- a/changelog.d/8258.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix messages over federation being lost until an event is sent into the same room. diff --git a/changelog.d/8259.misc b/changelog.d/8259.misc deleted file mode 100644 index a26779a664..0000000000 --- a/changelog.d/8259.misc +++ /dev/null @@ -1 +0,0 @@ -Switch to the JSON implementation from the standard library. diff --git a/changelog.d/8260.misc b/changelog.d/8260.misc deleted file mode 100644 index 164eea8b59..0000000000 --- a/changelog.d/8260.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to `synapse.util.async_helpers`. diff --git a/changelog.d/8261.misc b/changelog.d/8261.misc deleted file mode 100644 index bc91e9375c..0000000000 --- a/changelog.d/8261.misc +++ /dev/null @@ -1 +0,0 @@ -Simplify tests that mock asynchronous functions. diff --git a/changelog.d/8262.bugfix b/changelog.d/8262.bugfix deleted file mode 100644 index 2b84927de3..0000000000 --- a/changelog.d/8262.bugfix +++ /dev/null @@ -1 +0,0 @@ -Upgrade canonicaljson to version 1.4.0 to fix an unicode encoding issue. diff --git a/changelog.d/8265.bugfix b/changelog.d/8265.bugfix deleted file mode 100644 index 981a836d21..0000000000 --- a/changelog.d/8265.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix logstanding bug which could lead to incomplete database upgrades on SQLite. diff --git a/changelog.d/8268.bugfix b/changelog.d/8268.bugfix deleted file mode 100644 index 4b15a60253..0000000000 --- a/changelog.d/8268.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix stack overflow when stderr is redirected to the logging system, and the logging system encounters an error. diff --git a/changelog.d/8272.bugfix b/changelog.d/8272.bugfix deleted file mode 100644 index 532d0e22fe..0000000000 --- a/changelog.d/8272.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix messages over federation being lost until an event is sent into the same room. diff --git a/changelog.d/8275.feature b/changelog.d/8275.feature deleted file mode 100644 index 17549c3df3..0000000000 --- a/changelog.d/8275.feature +++ /dev/null @@ -1 +0,0 @@ -Add a config option to specify a whitelist of domains that a user can be redirected to after validating their email or phone number. \ No newline at end of file diff --git a/changelog.d/8278.bugfix b/changelog.d/8278.bugfix deleted file mode 100644 index 50e40ca2a9..0000000000 --- a/changelog.d/8278.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug which cause the logging system to report errors, if `DEBUG` was enabled and no `context` filter was applied. diff --git a/changelog.d/8279.misc b/changelog.d/8279.misc deleted file mode 100644 index 99f669001f..0000000000 --- a/changelog.d/8279.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to `StreamToken` and `RoomStreamToken` classes. diff --git a/changelog.d/8281.misc b/changelog.d/8281.misc deleted file mode 100644 index 74357120a7..0000000000 --- a/changelog.d/8281.misc +++ /dev/null @@ -1 +0,0 @@ -Change `StreamToken.room_key` to be a `RoomStreamToken` instance. diff --git a/changelog.d/8282.misc b/changelog.d/8282.misc deleted file mode 100644 index b6896a9300..0000000000 --- a/changelog.d/8282.misc +++ /dev/null @@ -1 +0,0 @@ -Clean up type hints for `PaginationConfig`. diff --git a/changelog.d/8287.bugfix b/changelog.d/8287.bugfix deleted file mode 100644 index 839781aa07..0000000000 --- a/changelog.d/8287.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix edge case where push could get delayed for a user until a later event was pushed. diff --git a/changelog.d/8288.misc b/changelog.d/8288.misc deleted file mode 100644 index c08a53a5ee..0000000000 --- a/changelog.d/8288.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor notifier code to correctly use the max event stream position. diff --git a/changelog.d/8294.feature b/changelog.d/8294.feature deleted file mode 100644 index b363e929ea..0000000000 --- a/changelog.d/8294.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental support for sharding event persister. diff --git a/changelog.d/8296.misc b/changelog.d/8296.misc deleted file mode 100644 index f593a5b347..0000000000 --- a/changelog.d/8296.misc +++ /dev/null @@ -1 +0,0 @@ -Use slotted classes where possible. diff --git a/changelog.d/8305.feature b/changelog.d/8305.feature deleted file mode 100644 index 862dfdf959..0000000000 --- a/changelog.d/8305.feature +++ /dev/null @@ -1 +0,0 @@ -Add the room topic and avatar to the room details admin API. diff --git a/changelog.d/8306.feature b/changelog.d/8306.feature deleted file mode 100644 index 5c23da4030..0000000000 --- a/changelog.d/8306.feature +++ /dev/null @@ -1 +0,0 @@ -Add an admin API for querying rooms where a user is a member. Contributed by @dklimpel. \ No newline at end of file diff --git a/changelog.d/8317.feature b/changelog.d/8317.feature deleted file mode 100644 index f9edda099c..0000000000 --- a/changelog.d/8317.feature +++ /dev/null @@ -1 +0,0 @@ -Support testing the local Synapse checkout against the [Complement homeserver test suite](https://github.com/matrix-org/complement/). \ No newline at end of file diff --git a/changelog.d/8320.feature b/changelog.d/8320.feature deleted file mode 100644 index 475a5fe62d..0000000000 --- a/changelog.d/8320.feature +++ /dev/null @@ -1 +0,0 @@ -Add `uk.half-shot.msc2778.login.application_service` login type to allow appservices to login. diff --git a/changelog.d/8322.bugfix b/changelog.d/8322.bugfix deleted file mode 100644 index 532d0e22fe..0000000000 --- a/changelog.d/8322.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix messages over federation being lost until an event is sent into the same room. diff --git a/changelog.d/8324.bugfix b/changelog.d/8324.bugfix deleted file mode 100644 index 32788a9284..0000000000 --- a/changelog.d/8324.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix fetching events from remote servers that are malformed. diff --git a/changelog.d/8326.misc b/changelog.d/8326.misc deleted file mode 100644 index 985d2c027a..0000000000 --- a/changelog.d/8326.misc +++ /dev/null @@ -1 +0,0 @@ -Update outdated usages of `metaclass` to python 3 syntax. \ No newline at end of file diff --git a/changelog.d/8329.bugfix b/changelog.d/8329.bugfix deleted file mode 100644 index 2f71f1f4b9..0000000000 --- a/changelog.d/8329.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix UnboundLocalError from occuring when appservices send malformed register request. \ No newline at end of file diff --git a/changelog.d/8330.misc b/changelog.d/8330.misc deleted file mode 100644 index fbfdd52473..0000000000 --- a/changelog.d/8330.misc +++ /dev/null @@ -1 +0,0 @@ -Move lint-related dependencies to package-extra field, update CONTRIBUTING.md to utilise this. diff --git a/changelog.d/8331.misc b/changelog.d/8331.misc deleted file mode 100644 index 0e1bae20ef..0000000000 --- a/changelog.d/8331.misc +++ /dev/null @@ -1 +0,0 @@ -Use the `admin_patterns` helper in additional locations. diff --git a/changelog.d/8335.misc b/changelog.d/8335.misc deleted file mode 100644 index 7e0a4c7d83..0000000000 --- a/changelog.d/8335.misc +++ /dev/null @@ -1 +0,0 @@ -Fix test logging to allow braces in log output. \ No newline at end of file diff --git a/changelog.d/8337.misc b/changelog.d/8337.misc deleted file mode 100644 index 4daf272204..0000000000 --- a/changelog.d/8337.misc +++ /dev/null @@ -1 +0,0 @@ -Remove `__future__` imports related to Python 2 compatibility. \ No newline at end of file diff --git a/changelog.d/8344.misc b/changelog.d/8344.misc deleted file mode 100644 index 0b342d5137..0000000000 --- a/changelog.d/8344.misc +++ /dev/null @@ -1 +0,0 @@ -Simplify `super()` calls to Python 3 syntax. diff --git a/changelog.d/8345.feature b/changelog.d/8345.feature deleted file mode 100644 index 4ee5b6a56e..0000000000 --- a/changelog.d/8345.feature +++ /dev/null @@ -1 +0,0 @@ -Add a configuration option that allows existing users to log in with OpenID Connect. Contributed by @BBBSnowball and @OmmyZhang. diff --git a/changelog.d/8353.bugfix b/changelog.d/8353.bugfix deleted file mode 100644 index 45fc0adb8d..0000000000 --- a/changelog.d/8353.bugfix +++ /dev/null @@ -1 +0,0 @@ -Don't send push notifications to expired user accounts. diff --git a/changelog.d/8354.misc b/changelog.d/8354.misc deleted file mode 100644 index 1d33cde2da..0000000000 --- a/changelog.d/8354.misc +++ /dev/null @@ -1 +0,0 @@ -Fix bad merge from `release-v1.20.0` branch to `develop`. diff --git a/changelog.d/8362.bugfix b/changelog.d/8362.bugfix deleted file mode 100644 index 4e50067c87..0000000000 --- a/changelog.d/8362.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fixed a regression in v1.19.0 with reactivating users through the admin API. diff --git a/changelog.d/8364.bugfix b/changelog.d/8364.bugfix deleted file mode 100644 index 7b82cbc388..0000000000 --- a/changelog.d/8364.bugfix +++ /dev/null @@ -1,2 +0,0 @@ -Fix a bug where during device registration the length of the device name wasn't -limited. diff --git a/changelog.d/8370.misc b/changelog.d/8370.misc deleted file mode 100644 index 1aaac1e0bf..0000000000 --- a/changelog.d/8370.misc +++ /dev/null @@ -1 +0,0 @@ -Factor out a `_send_dummy_event_for_room` method. diff --git a/changelog.d/8371.misc b/changelog.d/8371.misc deleted file mode 100644 index 6a54a9496a..0000000000 --- a/changelog.d/8371.misc +++ /dev/null @@ -1 +0,0 @@ -Improve logging of state resolution. diff --git a/changelog.d/8372.misc b/changelog.d/8372.misc deleted file mode 100644 index a56e36de4b..0000000000 --- a/changelog.d/8372.misc +++ /dev/null @@ -1 +0,0 @@ -Add type annotations to `SimpleHttpClient`. diff --git a/changelog.d/8373.bugfix b/changelog.d/8373.bugfix deleted file mode 100644 index e9d66a2088..0000000000 --- a/changelog.d/8373.bugfix +++ /dev/null @@ -1 +0,0 @@ -Include `guest_access` in the fields that are checked for null bytes when updating `room_stats_state`. Broke in v1.7.2. \ No newline at end of file diff --git a/changelog.d/8374.bugfix b/changelog.d/8374.bugfix deleted file mode 100644 index 155bc3404f..0000000000 --- a/changelog.d/8374.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix theoretical race condition where events are not sent down `/sync` if the synchrotron worker is restarted without restarting other workers. diff --git a/changelog.d/8375.doc b/changelog.d/8375.doc deleted file mode 100644 index d291fb92fa..0000000000 --- a/changelog.d/8375.doc +++ /dev/null @@ -1 +0,0 @@ -Add note to the reverse proxy settings documentation about disabling Apache's mod_security2. Contributed by Julian Fietkau (@jfietkau). diff --git a/changelog.d/8377.misc b/changelog.d/8377.misc deleted file mode 100644 index fbfdd52473..0000000000 --- a/changelog.d/8377.misc +++ /dev/null @@ -1 +0,0 @@ -Move lint-related dependencies to package-extra field, update CONTRIBUTING.md to utilise this. diff --git a/changelog.d/8383.misc b/changelog.d/8383.misc deleted file mode 100644 index cb8318bf57..0000000000 --- a/changelog.d/8383.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor ID generators to use `async with` syntax. diff --git a/changelog.d/8385.bugfix b/changelog.d/8385.bugfix deleted file mode 100644 index c42502a8e0..0000000000 --- a/changelog.d/8385.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug which could cause errors in rooms with malformed membership events, on servers using sqlite. diff --git a/changelog.d/8386.bugfix b/changelog.d/8386.bugfix deleted file mode 100644 index 24983a1e95..0000000000 --- a/changelog.d/8386.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in v1.20.0 which caused the `synapse_port_db` script to fail. diff --git a/changelog.d/8387.feature b/changelog.d/8387.feature deleted file mode 100644 index b363e929ea..0000000000 --- a/changelog.d/8387.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental support for sharding event persister. diff --git a/changelog.d/8388.misc b/changelog.d/8388.misc deleted file mode 100644 index aaaef88b66..0000000000 --- a/changelog.d/8388.misc +++ /dev/null @@ -1 +0,0 @@ -Add `EventStreamPosition` type. diff --git a/changelog.d/8396.feature b/changelog.d/8396.feature deleted file mode 100644 index b363e929ea..0000000000 --- a/changelog.d/8396.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental support for sharding event persister. diff --git a/changelog.d/8398.bugfix b/changelog.d/8398.bugfix deleted file mode 100644 index e432aeebf1..0000000000 --- a/changelog.d/8398.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix "Re-starting finished log context" warning when receiving an event we already had over federation. diff --git a/changelog.d/8399.misc b/changelog.d/8399.misc deleted file mode 100644 index ce6e8123cf..0000000000 --- a/changelog.d/8399.misc +++ /dev/null @@ -1 +0,0 @@ -Create a mechanism for marking tests "logcontext clean". diff --git a/changelog.d/8400.bugfix b/changelog.d/8400.bugfix deleted file mode 100644 index 835658ba5e..0000000000 --- a/changelog.d/8400.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix incorrect handling of timeouts on outgoing HTTP requests. diff --git a/changelog.d/8401.misc b/changelog.d/8401.misc deleted file mode 100644 index 27fd7ab129..0000000000 --- a/changelog.d/8401.misc +++ /dev/null @@ -1 +0,0 @@ -A pair of tiny cleanups in the federation request code. diff --git a/changelog.d/8402.misc b/changelog.d/8402.misc deleted file mode 100644 index ad1804d207..0000000000 --- a/changelog.d/8402.misc +++ /dev/null @@ -1 +0,0 @@ -Add checks on startup that PostgreSQL sequences are consistent with their associated tables. diff --git a/changelog.d/8404.misc b/changelog.d/8404.misc deleted file mode 100644 index 7aadded6c1..0000000000 --- a/changelog.d/8404.misc +++ /dev/null @@ -1 +0,0 @@ -Do not include appservice users when calculating the total MAU for a server. diff --git a/changelog.d/8405.feature b/changelog.d/8405.feature deleted file mode 100644 index f3c4a74bc7..0000000000 --- a/changelog.d/8405.feature +++ /dev/null @@ -1 +0,0 @@ -Consolidate the SSO error template across all configuration. diff --git a/changelog.d/8406.feature b/changelog.d/8406.feature deleted file mode 100644 index 1c6472ae7e..0000000000 --- a/changelog.d/8406.feature +++ /dev/null @@ -1 +0,0 @@ -Add prometheus metrics for replication requests. diff --git a/changelog.d/8410.bugfix b/changelog.d/8410.bugfix deleted file mode 100644 index 1323ddc525..0000000000 --- a/changelog.d/8410.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a v1.20.0 regression in the `synapse_port_db` script regarding the `ui_auth_sessions_ips` table. diff --git a/changelog.d/8413.feature b/changelog.d/8413.feature deleted file mode 100644 index abe40a901c..0000000000 --- a/changelog.d/8413.feature +++ /dev/null @@ -1 +0,0 @@ -Support passing additional single sign-on parameters to the client. diff --git a/changelog.d/8414.bugfix b/changelog.d/8414.bugfix deleted file mode 100644 index 315876e892..0000000000 --- a/changelog.d/8414.bugfix +++ /dev/null @@ -1 +0,0 @@ -Remove unnecessary 3PID registration check when resetting password via an email address. Bug introduced in v0.34.0rc2. \ No newline at end of file diff --git a/changelog.d/8415.doc b/changelog.d/8415.doc deleted file mode 100644 index 28b5798533..0000000000 --- a/changelog.d/8415.doc +++ /dev/null @@ -1 +0,0 @@ -Improve description of `server_name` config option in `homserver.yaml`. \ No newline at end of file diff --git a/changelog.d/8417.feature b/changelog.d/8417.feature deleted file mode 100644 index 17549c3df3..0000000000 --- a/changelog.d/8417.feature +++ /dev/null @@ -1 +0,0 @@ -Add a config option to specify a whitelist of domains that a user can be redirected to after validating their email or phone number. \ No newline at end of file diff --git a/changelog.d/8419.feature b/changelog.d/8419.feature deleted file mode 100644 index b363e929ea..0000000000 --- a/changelog.d/8419.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental support for sharding event persister. diff --git a/changelog.d/8420.feature b/changelog.d/8420.feature deleted file mode 100644 index 9d6849624d..0000000000 --- a/changelog.d/8420.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental reporting of metrics on expensive rooms for state-resolution. diff --git a/changelog.d/8422.misc b/changelog.d/8422.misc deleted file mode 100644 index 03fba120c6..0000000000 --- a/changelog.d/8422.misc +++ /dev/null @@ -1 +0,0 @@ -Typing fixes for `synapse.handlers.federation`. diff --git a/changelog.d/8423.misc b/changelog.d/8423.misc deleted file mode 100644 index 7260e3fa41..0000000000 --- a/changelog.d/8423.misc +++ /dev/null @@ -1 +0,0 @@ -Various refactors to simplify stream token handling. diff --git a/changelog.d/8425.feature b/changelog.d/8425.feature deleted file mode 100644 index b4ee5bb74b..0000000000 --- a/changelog.d/8425.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental prometheus metric to track numbers of "large" rooms for state resolutiom. diff --git a/changelog.d/8426.removal b/changelog.d/8426.removal deleted file mode 100644 index a56277fe7a..0000000000 --- a/changelog.d/8426.removal +++ /dev/null @@ -1 +0,0 @@ -Drop support for `prometheus_client` older than 0.4.0. diff --git a/changelog.d/8427.misc b/changelog.d/8427.misc deleted file mode 100644 index c9656b9112..0000000000 --- a/changelog.d/8427.misc +++ /dev/null @@ -1 +0,0 @@ -Make stream token serializing/deserializing async. diff --git a/changelog.d/8430.feature b/changelog.d/8430.feature deleted file mode 100644 index 1f31d42bc1..0000000000 --- a/changelog.d/8430.feature +++ /dev/null @@ -1 +0,0 @@ -Add prometheus metrics to track federation delays. diff --git a/synapse/__init__.py b/synapse/__init__.py index e40b582bd5..57f818125a 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -48,7 +48,7 @@ try: except ImportError: pass -__version__ = "1.20.1" +__version__ = "1.21.0" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From c501c80e467a0c7a2429633a5160a580195a8826 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 1 Oct 2020 13:17:59 +0100 Subject: [PATCH 084/134] fix version number we're not doing a final release yet! --- CHANGES.md | 4 ++-- synapse/__init__.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 38906ade49..8368ac0b92 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,5 @@ -Synapse 1.21.0 (2020-10-01) -=========================== +Synapse 1.21.0rc1 (2020-10-01) +============================== Features -------- diff --git a/synapse/__init__.py b/synapse/__init__.py index 57f818125a..4706974508 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -48,7 +48,7 @@ try: except ImportError: pass -__version__ = "1.21.0" +__version__ = "1.21.0rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 50e5174e8687ae3d368386dc020d869006cb6750 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 1 Oct 2020 13:27:01 +0100 Subject: [PATCH 085/134] changelog fixes --- CHANGES.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 8368ac0b92..267909d3e9 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -29,7 +29,7 @@ Bugfixes - Fix messages not being sent over federation until an event is sent into the same room. ([\#8230](https://github.com/matrix-org/synapse/issues/8230), [\#8247](https://github.com/matrix-org/synapse/issues/8247), [\#8258](https://github.com/matrix-org/synapse/issues/8258), [\#8272](https://github.com/matrix-org/synapse/issues/8272), [\#8322](https://github.com/matrix-org/synapse/issues/8322)) - Fix a longstanding bug where files that could not be thumbnailed would result in an Internal Server Error. ([\#8236](https://github.com/matrix-org/synapse/issues/8236)) - Upgrade minimum version of `canonicaljson` to version 1.4.0, to fix an unicode encoding issue. ([\#8262](https://github.com/matrix-org/synapse/issues/8262)) -- Fix logstanding bug which could lead to incomplete database upgrades on SQLite. ([\#8265](https://github.com/matrix-org/synapse/issues/8265)) +- Fix longstanding bug which could lead to incomplete database upgrades on SQLite. ([\#8265](https://github.com/matrix-org/synapse/issues/8265)) - Fix stack overflow when stderr is redirected to the logging system, and the logging system encounters an error. ([\#8268](https://github.com/matrix-org/synapse/issues/8268)) - Fix a bug which cause the logging system to report errors, if `DEBUG` was enabled and no `context` filter was applied. ([\#8278](https://github.com/matrix-org/synapse/issues/8278)) - Fix edge case where push could get delayed for a user until a later event was pushed. ([\#8287](https://github.com/matrix-org/synapse/issues/8287)) @@ -41,7 +41,6 @@ Bugfixes - Include `guest_access` in the fields that are checked for null bytes when updating `room_stats_state`. Broke in v1.7.2. ([\#8373](https://github.com/matrix-org/synapse/issues/8373)) - Fix theoretical race condition where events are not sent down `/sync` if the synchrotron worker is restarted without restarting other workers. ([\#8374](https://github.com/matrix-org/synapse/issues/8374)) - Fix a bug which could cause errors in rooms with malformed membership events, on servers using sqlite. ([\#8385](https://github.com/matrix-org/synapse/issues/8385)) -- Fix a bug introduced in v1.20.0 which caused the `synapse_port_db` script to fail. ([\#8386](https://github.com/matrix-org/synapse/issues/8386)) - Fix "Re-starting finished log context" warning when receiving an event we already had over federation. ([\#8398](https://github.com/matrix-org/synapse/issues/8398)) - Fix incorrect handling of timeouts on outgoing HTTP requests. ([\#8400](https://github.com/matrix-org/synapse/issues/8400)) - Fix a regression in v1.20.0 in the `synapse_port_db` script regarding the `ui_auth_sessions_ips` table. ([\#8410](https://github.com/matrix-org/synapse/issues/8410)) From b1f4e6e4fc3d0cf5e10d6a79ef89abdcc9e63e8c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 1 Oct 2020 13:34:24 +0100 Subject: [PATCH 086/134] fix a logging error in thumbnailer (#8435) Introduced in #8236 --- changelog.d/8435.bugfix | 1 + synapse/rest/media/v1/media_repository.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8435.bugfix diff --git a/changelog.d/8435.bugfix b/changelog.d/8435.bugfix new file mode 100644 index 0000000000..6f04871015 --- /dev/null +++ b/changelog.d/8435.bugfix @@ -0,0 +1 @@ +Fix a longstanding bug where files that could not be thumbnailed would result in an Internal Server Error. diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index ae6822d6e7..e1192b47cd 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -637,7 +637,7 @@ class MediaRepository: thumbnailer = Thumbnailer(input_path) except ThumbnailError as e: logger.warning( - "Unable to generate thumbnails for remote media %s from %s using a method of %s and type of %s: %s", + "Unable to generate thumbnails for remote media %s from %s of type %s: %s", media_id, server_name, media_type, From 2eb947e0eee7d66a77fc4a7e7af5234cacece3e6 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 1 Oct 2020 13:38:26 +0100 Subject: [PATCH 087/134] update changelog --- CHANGES.md | 2 +- changelog.d/8435.bugfix | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) delete mode 100644 changelog.d/8435.bugfix diff --git a/CHANGES.md b/CHANGES.md index 267909d3e9..29711c60ce 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -27,7 +27,7 @@ Bugfixes - Fix inconsistent handling of non-existent push rules, and stop tracking the `enabled` state of removed push rules. ([\#7796](https://github.com/matrix-org/synapse/issues/7796)) - Fix a longstanding bug when storing a media file with an empty `upload_name`. ([\#7905](https://github.com/matrix-org/synapse/issues/7905)) - Fix messages not being sent over federation until an event is sent into the same room. ([\#8230](https://github.com/matrix-org/synapse/issues/8230), [\#8247](https://github.com/matrix-org/synapse/issues/8247), [\#8258](https://github.com/matrix-org/synapse/issues/8258), [\#8272](https://github.com/matrix-org/synapse/issues/8272), [\#8322](https://github.com/matrix-org/synapse/issues/8322)) -- Fix a longstanding bug where files that could not be thumbnailed would result in an Internal Server Error. ([\#8236](https://github.com/matrix-org/synapse/issues/8236)) +- Fix a longstanding bug where files that could not be thumbnailed would result in an Internal Server Error. ([\#8236](https://github.com/matrix-org/synapse/issues/8236), [\#8435](https://github.com/matrix-org/synapse/issues/8435)) - Upgrade minimum version of `canonicaljson` to version 1.4.0, to fix an unicode encoding issue. ([\#8262](https://github.com/matrix-org/synapse/issues/8262)) - Fix longstanding bug which could lead to incomplete database upgrades on SQLite. ([\#8265](https://github.com/matrix-org/synapse/issues/8265)) - Fix stack overflow when stderr is redirected to the logging system, and the logging system encounters an error. ([\#8268](https://github.com/matrix-org/synapse/issues/8268)) diff --git a/changelog.d/8435.bugfix b/changelog.d/8435.bugfix deleted file mode 100644 index 6f04871015..0000000000 --- a/changelog.d/8435.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a longstanding bug where files that could not be thumbnailed would result in an Internal Server Error. From 61aaf36a1cdaa0057d0f4d8784a8e126d5f3988a Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 1 Oct 2020 13:38:20 -0400 Subject: [PATCH 088/134] Do not expose the experimental appservice login flow to clients. (#8440) --- changelog.d/8440.bugfix | 1 + synapse/rest/client/v1/login.py | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-) create mode 100644 changelog.d/8440.bugfix diff --git a/changelog.d/8440.bugfix b/changelog.d/8440.bugfix new file mode 100644 index 0000000000..84d5f541d1 --- /dev/null +++ b/changelog.d/8440.bugfix @@ -0,0 +1 @@ +Do not expose the experimental `uk.half-shot.msc2778.login.application_service` flow in the login API. diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index b9347b87c7..3d1693d7ac 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -111,8 +111,6 @@ class LoginRestServlet(RestServlet): ({"type": t} for t in self.auth_handler.get_supported_login_types()) ) - flows.append({"type": LoginRestServlet.APPSERVICE_TYPE}) - return 200, {"flows": flows} def on_OPTIONS(self, request: SynapseRequest): From 05ee048f2c9ce0bb8a7d2430b21ca3682ef5858b Mon Sep 17 00:00:00 2001 From: BBBSnowball Date: Thu, 1 Oct 2020 19:54:35 +0200 Subject: [PATCH 089/134] Add config option for always using "userinfo endpoint" for OIDC (#7658) This allows for connecting to certain IdPs, e.g. GitLab. --- changelog.d/7658.feature | 1 + docs/openid.md | 41 +++++++++++++++++++++++++------- docs/sample_config.yaml | 8 +++++++ synapse/config/oidc_config.py | 9 +++++++ synapse/handlers/oidc_handler.py | 11 +++++---- tests/handlers/test_oidc.py | 10 ++++++-- 6 files changed, 65 insertions(+), 15 deletions(-) create mode 100644 changelog.d/7658.feature diff --git a/changelog.d/7658.feature b/changelog.d/7658.feature new file mode 100644 index 0000000000..fbf345988d --- /dev/null +++ b/changelog.d/7658.feature @@ -0,0 +1 @@ +Add a configuration option for always using the "userinfo endpoint" for OpenID Connect. This fixes support for some identity providers, e.g. GitLab. Contributed by Benjamin Koch. diff --git a/docs/openid.md b/docs/openid.md index 70b37f858b..4873681999 100644 --- a/docs/openid.md +++ b/docs/openid.md @@ -238,13 +238,36 @@ Synapse config: ```yaml oidc_config: - enabled: true - issuer: "https://id.twitch.tv/oauth2/" - client_id: "your-client-id" # TO BE FILLED - client_secret: "your-client-secret" # TO BE FILLED - client_auth_method: "client_secret_post" - user_mapping_provider: - config: - localpart_template: '{{ user.preferred_username }}' - display_name_template: '{{ user.name }}' + enabled: true + issuer: "https://id.twitch.tv/oauth2/" + client_id: "your-client-id" # TO BE FILLED + client_secret: "your-client-secret" # TO BE FILLED + client_auth_method: "client_secret_post" + user_mapping_provider: + config: + localpart_template: "{{ user.preferred_username }}" + display_name_template: "{{ user.name }}" +``` + +### GitLab + +1. Create a [new application](https://gitlab.com/profile/applications). +2. Add the `read_user` and `openid` scopes. +3. Add this Callback URL: `[synapse public baseurl]/_synapse/oidc/callback` + +Synapse config: + +```yaml +oidc_config: + enabled: true + issuer: "https://gitlab.com/" + client_id: "your-client-id" # TO BE FILLED + client_secret: "your-client-secret" # TO BE FILLED + client_auth_method: "client_secret_post" + scopes: ["openid", "read_user"] + user_profile_method: "userinfo_endpoint" + user_mapping_provider: + config: + localpart_template: '{{ user.nickname }}' + display_name_template: '{{ user.name }}' ``` diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 8a3206e845..b2c1d7a737 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1714,6 +1714,14 @@ oidc_config: # #skip_verification: true + # Whether to fetch the user profile from the userinfo endpoint. Valid + # values are: "auto" or "userinfo_endpoint". + # + # Defaults to "auto", which fetches the userinfo endpoint if "openid" is included + # in `scopes`. Uncomment the following to always fetch the userinfo endpoint. + # + #user_profile_method: "userinfo_endpoint" + # Uncomment to allow a user logging in via OIDC to match a pre-existing account instead # of failing. This could be used if switching from password logins to OIDC. Defaults to false. # diff --git a/synapse/config/oidc_config.py b/synapse/config/oidc_config.py index f924116819..7597fbc864 100644 --- a/synapse/config/oidc_config.py +++ b/synapse/config/oidc_config.py @@ -56,6 +56,7 @@ class OIDCConfig(Config): self.oidc_userinfo_endpoint = oidc_config.get("userinfo_endpoint") self.oidc_jwks_uri = oidc_config.get("jwks_uri") self.oidc_skip_verification = oidc_config.get("skip_verification", False) + self.oidc_user_profile_method = oidc_config.get("user_profile_method", "auto") self.oidc_allow_existing_users = oidc_config.get("allow_existing_users", False) ump_config = oidc_config.get("user_mapping_provider", {}) @@ -159,6 +160,14 @@ class OIDCConfig(Config): # #skip_verification: true + # Whether to fetch the user profile from the userinfo endpoint. Valid + # values are: "auto" or "userinfo_endpoint". + # + # Defaults to "auto", which fetches the userinfo endpoint if "openid" is included + # in `scopes`. Uncomment the following to always fetch the userinfo endpoint. + # + #user_profile_method: "userinfo_endpoint" + # Uncomment to allow a user logging in via OIDC to match a pre-existing account instead # of failing. This could be used if switching from password logins to OIDC. Defaults to false. # diff --git a/synapse/handlers/oidc_handler.py b/synapse/handlers/oidc_handler.py index 19cd652675..05ac86e697 100644 --- a/synapse/handlers/oidc_handler.py +++ b/synapse/handlers/oidc_handler.py @@ -96,6 +96,7 @@ class OidcHandler: self.hs = hs self._callback_url = hs.config.oidc_callback_url # type: str self._scopes = hs.config.oidc_scopes # type: List[str] + self._user_profile_method = hs.config.oidc_user_profile_method # type: str self._client_auth = ClientAuth( hs.config.oidc_client_id, hs.config.oidc_client_secret, @@ -196,11 +197,11 @@ class OidcHandler: % (m["response_types_supported"],) ) - # If the openid scope was not requested, we need a userinfo endpoint to fetch user infos + # Ensure there's a userinfo endpoint to fetch from if it is required. if self._uses_userinfo: if m.get("userinfo_endpoint") is None: raise ValueError( - 'provider has no "userinfo_endpoint", even though it is required because the "openid" scope is not requested' + 'provider has no "userinfo_endpoint", even though it is required' ) else: # If we're not using userinfo, we need a valid jwks to validate the ID token @@ -220,8 +221,10 @@ class OidcHandler: ``access_token`` with the ``userinfo_endpoint``. """ - # Maybe that should be user-configurable and not inferred? - return "openid" not in self._scopes + return ( + "openid" not in self._scopes + or self._user_profile_method == "userinfo_endpoint" + ) async def load_metadata(self) -> OpenIDProviderMetadata: """Load and validate the provider metadata. diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index d5087e58be..b6f436c016 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -286,9 +286,15 @@ class OidcHandlerTestCase(HomeserverTestCase): h._validate_metadata, ) - # Tests for configs that the userinfo endpoint + # Tests for configs that require the userinfo endpoint self.assertFalse(h._uses_userinfo) - h._scopes = [] # do not request the openid scope + self.assertEqual(h._user_profile_method, "auto") + h._user_profile_method = "userinfo_endpoint" + self.assertTrue(h._uses_userinfo) + + # Revert the profile method and do not request the "openid" scope. + h._user_profile_method = "auto" + h._scopes = [] self.assertTrue(h._uses_userinfo) self.assertRaisesRegex(ValueError, "userinfo_endpoint", h._validate_metadata) From 6c5d5e507e629cf57ae8c1034879e8ffaef33e9f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 2 Oct 2020 09:57:12 +0100 Subject: [PATCH 090/134] Add unit test for event persister sharding (#8433) --- changelog.d/8433.misc | 1 + mypy.ini | 3 + stubs/txredisapi.pyi | 20 +- synapse/replication/tcp/handler.py | 6 +- synapse/replication/tcp/redis.py | 40 +++- tests/replication/_base.py | 224 ++++++++++++++++-- .../test_sharded_event_persister.py | 102 ++++++++ tests/unittest.py | 2 +- 8 files changed, 371 insertions(+), 27 deletions(-) create mode 100644 changelog.d/8433.misc create mode 100644 tests/replication/test_sharded_event_persister.py diff --git a/changelog.d/8433.misc b/changelog.d/8433.misc new file mode 100644 index 0000000000..05f8b5bbf4 --- /dev/null +++ b/changelog.d/8433.misc @@ -0,0 +1 @@ +Add unit test for event persister sharding. diff --git a/mypy.ini b/mypy.ini index c283f15b21..e84ad04e41 100644 --- a/mypy.ini +++ b/mypy.ini @@ -143,3 +143,6 @@ ignore_missing_imports = True [mypy-nacl.*] ignore_missing_imports = True + +[mypy-hiredis] +ignore_missing_imports = True diff --git a/stubs/txredisapi.pyi b/stubs/txredisapi.pyi index c66413f003..522244bb57 100644 --- a/stubs/txredisapi.pyi +++ b/stubs/txredisapi.pyi @@ -16,7 +16,7 @@ """Contains *incomplete* type hints for txredisapi. """ -from typing import List, Optional, Union +from typing import List, Optional, Union, Type class RedisProtocol: def publish(self, channel: str, message: bytes): ... @@ -42,3 +42,21 @@ def lazyConnection( class SubscriberFactory: def buildProtocol(self, addr): ... + +class ConnectionHandler: ... + +class RedisFactory: + continueTrying: bool + handler: RedisProtocol + def __init__( + self, + uuid: str, + dbid: Optional[int], + poolsize: int, + isLazy: bool = False, + handler: Type = ConnectionHandler, + charset: str = "utf-8", + password: Optional[str] = None, + replyTimeout: Optional[int] = None, + convertNumbers: Optional[int] = True, + ): ... diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index b323841f73..e92da7b263 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -251,10 +251,9 @@ class ReplicationCommandHandler: using TCP. """ if hs.config.redis.redis_enabled: - import txredisapi - from synapse.replication.tcp.redis import ( RedisDirectTcpReplicationClientFactory, + lazyConnection, ) logger.info( @@ -271,7 +270,8 @@ class ReplicationCommandHandler: # connection after SUBSCRIBE is called). # First create the connection for sending commands. - outbound_redis_connection = txredisapi.lazyConnection( + outbound_redis_connection = lazyConnection( + reactor=hs.get_reactor(), host=hs.config.redis_host, port=hs.config.redis_port, password=hs.config.redis.redis_password, diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py index f225e533de..de19705c1f 100644 --- a/synapse/replication/tcp/redis.py +++ b/synapse/replication/tcp/redis.py @@ -15,7 +15,7 @@ import logging from inspect import isawaitable -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional import txredisapi @@ -228,3 +228,41 @@ class RedisDirectTcpReplicationClientFactory(txredisapi.SubscriberFactory): p.password = self.password return p + + +def lazyConnection( + reactor, + host: str = "localhost", + port: int = 6379, + dbid: Optional[int] = None, + reconnect: bool = True, + charset: str = "utf-8", + password: Optional[str] = None, + connectTimeout: Optional[int] = None, + replyTimeout: Optional[int] = None, + convertNumbers: bool = True, +) -> txredisapi.RedisProtocol: + """Equivalent to `txredisapi.lazyConnection`, except allows specifying a + reactor. + """ + + isLazy = True + poolsize = 1 + + uuid = "%s:%d" % (host, port) + factory = txredisapi.RedisFactory( + uuid, + dbid, + poolsize, + isLazy, + txredisapi.ConnectionHandler, + charset, + password, + replyTimeout, + convertNumbers, + ) + factory.continueTrying = reconnect + for x in range(poolsize): + reactor.connectTCP(host, port, factory, connectTimeout) + + return factory.handler diff --git a/tests/replication/_base.py b/tests/replication/_base.py index ae60874ec3..81ea985b9f 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -12,13 +12,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import logging from typing import Any, Callable, List, Optional, Tuple import attr +import hiredis from twisted.internet.interfaces import IConsumer, IPullProducer, IReactorTime +from twisted.internet.protocol import Protocol from twisted.internet.task import LoopingCall from twisted.web.http import HTTPChannel @@ -27,7 +28,7 @@ from synapse.app.generic_worker import ( GenericWorkerServer, ) from synapse.http.server import JsonResource -from synapse.http.site import SynapseRequest +from synapse.http.site import SynapseRequest, SynapseSite from synapse.replication.http import ReplicationRestResource, streams from synapse.replication.tcp.handler import ReplicationCommandHandler from synapse.replication.tcp.protocol import ClientReplicationStreamProtocol @@ -197,19 +198,37 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): self.server_factory = ReplicationStreamProtocolFactory(self.hs) self.streamer = self.hs.get_replication_streamer() + # Fake in memory Redis server that servers can connect to. + self._redis_server = FakeRedisPubSubServer() + store = self.hs.get_datastore() self.database_pool = store.db_pool self.reactor.lookups["testserv"] = "1.2.3.4" + self.reactor.lookups["localhost"] = "127.0.0.1" - self._worker_hs_to_resource = {} + # A map from a HS instance to the associated HTTP Site to use for + # handling inbound HTTP requests to that instance. + self._hs_to_site = {self.hs: self.site} + + if self.hs.config.redis.redis_enabled: + # Handle attempts to connect to fake redis server. + self.reactor.add_tcp_client_callback( + "localhost", 6379, self.connect_any_redis_attempts, + ) + + self.hs.get_tcp_replication().start_replication(self.hs) # When we see a connection attempt to the master replication listener we # automatically set up the connection. This is so that tests don't # manually have to go and explicitly set it up each time (plus sometimes # it is impossible to write the handling explicitly in the tests). + # + # Register the master replication listener: self.reactor.add_tcp_client_callback( - "1.2.3.4", 8765, self._handle_http_replication_attempt + "1.2.3.4", + 8765, + lambda: self._handle_http_replication_attempt(self.hs, 8765), ) def create_test_json_resource(self): @@ -253,28 +272,63 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): **kwargs ) + # If the instance is in the `instance_map` config then workers may try + # and send HTTP requests to it, so we register it with + # `_handle_http_replication_attempt` like we do with the master HS. + instance_name = worker_hs.get_instance_name() + instance_loc = worker_hs.config.worker.instance_map.get(instance_name) + if instance_loc: + # Ensure the host is one that has a fake DNS entry. + if instance_loc.host not in self.reactor.lookups: + raise Exception( + "Host does not have an IP for instance_map[%r].host = %r" + % (instance_name, instance_loc.host,) + ) + + self.reactor.add_tcp_client_callback( + self.reactor.lookups[instance_loc.host], + instance_loc.port, + lambda: self._handle_http_replication_attempt( + worker_hs, instance_loc.port + ), + ) + store = worker_hs.get_datastore() store.db_pool._db_pool = self.database_pool._db_pool - repl_handler = ReplicationCommandHandler(worker_hs) - client = ClientReplicationStreamProtocol( - worker_hs, "client", "test", self.clock, repl_handler, - ) - server = self.server_factory.buildProtocol(None) + # Set up TCP replication between master and the new worker if we don't + # have Redis support enabled. + if not worker_hs.config.redis_enabled: + repl_handler = ReplicationCommandHandler(worker_hs) + client = ClientReplicationStreamProtocol( + worker_hs, "client", "test", self.clock, repl_handler, + ) + server = self.server_factory.buildProtocol(None) - client_transport = FakeTransport(server, self.reactor) - client.makeConnection(client_transport) + client_transport = FakeTransport(server, self.reactor) + client.makeConnection(client_transport) - server_transport = FakeTransport(client, self.reactor) - server.makeConnection(server_transport) + server_transport = FakeTransport(client, self.reactor) + server.makeConnection(server_transport) # Set up a resource for the worker - resource = ReplicationRestResource(self.hs) + resource = ReplicationRestResource(worker_hs) for servlet in self.servlets: servlet(worker_hs, resource) - self._worker_hs_to_resource[worker_hs] = resource + self._hs_to_site[worker_hs] = SynapseSite( + logger_name="synapse.access.http.fake", + site_tag="{}-{}".format( + worker_hs.config.server.server_name, worker_hs.get_instance_name() + ), + config=worker_hs.config.server.listeners[0], + resource=resource, + server_version_string="1", + ) + + if worker_hs.config.redis.redis_enabled: + worker_hs.get_tcp_replication().start_replication(worker_hs) return worker_hs @@ -285,7 +339,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): return config def render_on_worker(self, worker_hs: HomeServer, request: SynapseRequest): - render(request, self._worker_hs_to_resource[worker_hs], self.reactor) + render(request, self._hs_to_site[worker_hs].resource, self.reactor) def replicate(self): """Tell the master side of replication that something has happened, and then @@ -294,9 +348,9 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): self.streamer.on_notifier_poke() self.pump() - def _handle_http_replication_attempt(self): - """Handles a connection attempt to the master replication HTTP - listener. + def _handle_http_replication_attempt(self, hs, repl_port): + """Handles a connection attempt to the given HS replication HTTP + listener on the given port. """ # We should have at least one outbound connection attempt, where the @@ -305,7 +359,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): self.assertGreaterEqual(len(clients), 1) (host, port, client_factory, _timeout, _bindAddress) = clients.pop() self.assertEqual(host, "1.2.3.4") - self.assertEqual(port, 8765) + self.assertEqual(port, repl_port) # Set up client side protocol client_protocol = client_factory.buildProtocol(None) @@ -315,7 +369,7 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): # Set up the server side protocol channel = _PushHTTPChannel(self.reactor) channel.requestFactory = request_factory - channel.site = self.site + channel.site = self._hs_to_site[hs] # Connect client to server and vice versa. client_to_server_transport = FakeTransport( @@ -333,6 +387,32 @@ class BaseMultiWorkerStreamTestCase(unittest.HomeserverTestCase): # inside `connecTCP` before the connection has been passed back to the # code that requested the TCP connection. + def connect_any_redis_attempts(self): + """If redis is enabled we need to deal with workers connecting to a + redis server. We don't want to use a real Redis server so we use a + fake one. + """ + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0) + self.assertEqual(host, "localhost") + self.assertEqual(port, 6379) + + client_protocol = client_factory.buildProtocol(None) + server_protocol = self._redis_server.buildProtocol(None) + + client_to_server_transport = FakeTransport( + server_protocol, self.reactor, client_protocol + ) + client_protocol.makeConnection(client_to_server_transport) + + server_to_client_transport = FakeTransport( + client_protocol, self.reactor, server_protocol + ) + server_protocol.makeConnection(server_to_client_transport) + + return client_to_server_transport, server_to_client_transport + class TestReplicationDataHandler(GenericWorkerReplicationHandler): """Drop-in for ReplicationDataHandler which just collects RDATA rows""" @@ -467,3 +547,105 @@ class _PullToPushProducer: pass self.stopProducing() + + +class FakeRedisPubSubServer: + """A fake Redis server for pub/sub. + """ + + def __init__(self): + self._subscribers = set() + + def add_subscriber(self, conn): + """A connection has called SUBSCRIBE + """ + self._subscribers.add(conn) + + def remove_subscriber(self, conn): + """A connection has called UNSUBSCRIBE + """ + self._subscribers.discard(conn) + + def publish(self, conn, channel, msg) -> int: + """A connection want to publish a message to subscribers. + """ + for sub in self._subscribers: + sub.send(["message", channel, msg]) + + return len(self._subscribers) + + def buildProtocol(self, addr): + return FakeRedisPubSubProtocol(self) + + +class FakeRedisPubSubProtocol(Protocol): + """A connection from a client talking to the fake Redis server. + """ + + def __init__(self, server: FakeRedisPubSubServer): + self._server = server + self._reader = hiredis.Reader() + + def dataReceived(self, data): + self._reader.feed(data) + + # We might get multiple messages in one packet. + while True: + msg = self._reader.gets() + + if msg is False: + # No more messages. + return + + if not isinstance(msg, list): + # Inbound commands should always be a list + raise Exception("Expected redis list") + + self.handle_command(msg[0], *msg[1:]) + + def handle_command(self, command, *args): + """Received a Redis command from the client. + """ + + # We currently only support pub/sub. + if command == b"PUBLISH": + channel, message = args + num_subscribers = self._server.publish(self, channel, message) + self.send(num_subscribers) + elif command == b"SUBSCRIBE": + (channel,) = args + self._server.add_subscriber(self) + self.send(["subscribe", channel, 1]) + else: + raise Exception("Unknown command") + + def send(self, msg): + """Send a message back to the client. + """ + raw = self.encode(msg).encode("utf-8") + + self.transport.write(raw) + self.transport.flush() + + def encode(self, obj): + """Encode an object to its Redis format. + + Supports: strings/bytes, integers and list/tuples. + """ + + if isinstance(obj, bytes): + # We assume bytes are just unicode strings. + obj = obj.decode("utf-8") + + if isinstance(obj, str): + return "${len}\r\n{str}\r\n".format(len=len(obj), str=obj) + if isinstance(obj, int): + return ":{val}\r\n".format(val=obj) + if isinstance(obj, (list, tuple)): + items = "".join(self.encode(a) for a in obj) + return "*{len}\r\n{items}".format(len=len(obj), items=items) + + raise Exception("Unrecognized type for encoding redis: %r: %r", type(obj), obj) + + def connectionLost(self, reason): + self._server.remove_subscriber(self) diff --git a/tests/replication/test_sharded_event_persister.py b/tests/replication/test_sharded_event_persister.py new file mode 100644 index 0000000000..6068d14905 --- /dev/null +++ b/tests/replication/test_sharded_event_persister.py @@ -0,0 +1,102 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging + +from synapse.rest import admin +from synapse.rest.client.v1 import login, room + +from tests.replication._base import BaseMultiWorkerStreamTestCase +from tests.utils import USE_POSTGRES_FOR_TESTS + +logger = logging.getLogger(__name__) + + +class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): + """Checks event persisting sharding works + """ + + # Event persister sharding requires postgres (due to needing + # `MutliWriterIdGenerator`). + if not USE_POSTGRES_FOR_TESTS: + skip = "Requires Postgres" + + servlets = [ + admin.register_servlets_for_client_rest_resource, + room.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + # Register a user who sends a message that we'll get notified about + self.other_user_id = self.register_user("otheruser", "pass") + self.other_access_token = self.login("otheruser", "pass") + + def default_config(self): + conf = super().default_config() + conf["redis"] = {"enabled": "true"} + conf["stream_writers"] = {"events": ["worker1", "worker2"]} + conf["instance_map"] = { + "worker1": {"host": "testserv", "port": 1001}, + "worker2": {"host": "testserv", "port": 1002}, + } + return conf + + def test_basic(self): + """Simple test to ensure that multiple rooms can be created and joined, + and that different rooms get handled by different instances. + """ + + self.make_worker_hs( + "synapse.app.generic_worker", {"worker_name": "worker1"}, + ) + + self.make_worker_hs( + "synapse.app.generic_worker", {"worker_name": "worker2"}, + ) + + persisted_on_1 = False + persisted_on_2 = False + + store = self.hs.get_datastore() + + user_id = self.register_user("user", "pass") + access_token = self.login("user", "pass") + + # Keep making new rooms until we see rooms being persisted on both + # workers. + for _ in range(10): + # Create a room + room = self.helper.create_room_as(user_id, tok=access_token) + + # The other user joins + self.helper.join( + room=room, user=self.other_user_id, tok=self.other_access_token + ) + + # The other user sends some messages + rseponse = self.helper.send(room, body="Hi!", tok=self.other_access_token) + event_id = rseponse["event_id"] + + # The event position includes which instance persisted the event. + pos = self.get_success(store.get_position_for_event(event_id)) + + persisted_on_1 |= pos.instance_name == "worker1" + persisted_on_2 |= pos.instance_name == "worker2" + + if persisted_on_1 and persisted_on_2: + break + + self.assertTrue(persisted_on_1) + self.assertTrue(persisted_on_2) diff --git a/tests/unittest.py b/tests/unittest.py index e654c0442d..82ede9de34 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -241,7 +241,7 @@ class HomeserverTestCase(TestCase): # create a site to wrap the resource. self.site = SynapseSite( logger_name="synapse.access.http.fake", - site_tag="test", + site_tag=self.hs.config.server.server_name, config=self.hs.config.server.listeners[0], resource=self.resource, server_version_string="1", From 3bd3707cb9615b5a9f7f7449ebe3ec495017ee9f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 2 Oct 2020 11:05:29 +0100 Subject: [PATCH 091/134] Fix malformed log line in new federation "catch up" logic (#8442) --- changelog.d/8442.bugfix | 1 + synapse/federation/sender/per_destination_queue.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8442.bugfix diff --git a/changelog.d/8442.bugfix b/changelog.d/8442.bugfix new file mode 100644 index 0000000000..6f779a1de5 --- /dev/null +++ b/changelog.d/8442.bugfix @@ -0,0 +1 @@ +Fix malformed log line in new federation "catch up" logic. diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index 2657767fd1..bc99af3fdd 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -490,7 +490,7 @@ class PerDestinationQueue: ) if logger.isEnabledFor(logging.INFO): - rooms = (p.room_id for p in catchup_pdus) + rooms = [p.room_id for p in catchup_pdus] logger.info("Catching up rooms to %s: %r", self._destination, rooms) success = await self._transaction_manager.send_new_transaction( From 34ff8da83b54024289f515c6d73e6b486574d699 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 2 Oct 2020 06:15:53 -0400 Subject: [PATCH 092/134] Convert additional templates to Jinja (#8444) This converts a few more of our inline HTML templates to Jinja. This is somewhat part of #7280 and should make it a bit easier to customize these in the future. --- changelog.d/8444.bugfix | 1 + synapse/config/_base.py | 11 +- synapse/config/captcha.py | 3 + synapse/config/consent_config.py | 2 + synapse/config/registration.py | 5 + synapse/res/templates/auth_success.html | 21 ++++ synapse/res/templates/recaptcha.html | 38 +++++++ synapse/res/templates/terms.html | 20 ++++ synapse/rest/client/v2_alpha/auth.py | 136 +++++------------------- 9 files changed, 121 insertions(+), 116 deletions(-) create mode 100644 changelog.d/8444.bugfix create mode 100644 synapse/res/templates/auth_success.html create mode 100644 synapse/res/templates/recaptcha.html create mode 100644 synapse/res/templates/terms.html diff --git a/changelog.d/8444.bugfix b/changelog.d/8444.bugfix new file mode 100644 index 0000000000..30c4328d4b --- /dev/null +++ b/changelog.d/8444.bugfix @@ -0,0 +1 @@ +Convert additional templates from inline HTML to Jinja2 templates. diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 05a66841c3..85f65da4d9 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -242,12 +242,11 @@ class Config: env = jinja2.Environment(loader=loader, autoescape=autoescape) # Update the environment with our custom filters - env.filters.update( - { - "format_ts": _format_ts_filter, - "mxc_to_http": _create_mxc_to_http_filter(self.public_baseurl), - } - ) + env.filters.update({"format_ts": _format_ts_filter}) + if self.public_baseurl: + env.filters.update( + {"mxc_to_http": _create_mxc_to_http_filter(self.public_baseurl)} + ) for filename in filenames: # Load the template diff --git a/synapse/config/captcha.py b/synapse/config/captcha.py index 82f04d7966..cb00958165 100644 --- a/synapse/config/captcha.py +++ b/synapse/config/captcha.py @@ -28,6 +28,9 @@ class CaptchaConfig(Config): "recaptcha_siteverify_api", "https://www.recaptcha.net/recaptcha/api/siteverify", ) + self.recaptcha_template = self.read_templates( + ["recaptcha.html"], autoescape=True + )[0] def generate_config_section(self, **kwargs): return """\ diff --git a/synapse/config/consent_config.py b/synapse/config/consent_config.py index fbddebeeab..6efa59b110 100644 --- a/synapse/config/consent_config.py +++ b/synapse/config/consent_config.py @@ -89,6 +89,8 @@ class ConsentConfig(Config): def read_config(self, config, **kwargs): consent_config = config.get("user_consent") + self.terms_template = self.read_templates(["terms.html"], autoescape=True)[0] + if consent_config is None: return self.user_consent_version = str(consent_config["version"]) diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 5ffbb934fe..d7e3690a32 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -187,6 +187,11 @@ class RegistrationConfig(Config): session_lifetime = self.parse_duration(session_lifetime) self.session_lifetime = session_lifetime + # The success template used during fallback auth. + self.fallback_success_template = self.read_templates( + ["auth_success.html"], autoescape=True + )[0] + def generate_config_section(self, generate_secrets=False, **kwargs): if generate_secrets: registration_shared_secret = 'registration_shared_secret: "%s"' % ( diff --git a/synapse/res/templates/auth_success.html b/synapse/res/templates/auth_success.html new file mode 100644 index 0000000000..baf4633142 --- /dev/null +++ b/synapse/res/templates/auth_success.html @@ -0,0 +1,21 @@ + + +Success! + + + + + +

+

Thank you

+

You may now close this window and return to the application

+
+ + diff --git a/synapse/res/templates/recaptcha.html b/synapse/res/templates/recaptcha.html new file mode 100644 index 0000000000..63944dc608 --- /dev/null +++ b/synapse/res/templates/recaptcha.html @@ -0,0 +1,38 @@ + + +Authentication + + + + + + + +
+
+

+ Hello! We need to prevent computer programs and other automated + things from creating accounts on this server. +

+

+ Please verify that you're not a robot. +

+ +
+
+ +
+ +
+ + diff --git a/synapse/res/templates/terms.html b/synapse/res/templates/terms.html new file mode 100644 index 0000000000..dfef9897ee --- /dev/null +++ b/synapse/res/templates/terms.html @@ -0,0 +1,20 @@ + + +Authentication + + + + +
+ +
+ + diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index 097538f968..5fbfae5991 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -25,94 +25,6 @@ from ._base import client_patterns logger = logging.getLogger(__name__) -RECAPTCHA_TEMPLATE = """ - - -Authentication - - - - - - - -
-
-

- Hello! We need to prevent computer programs and other automated - things from creating accounts on this server. -

-

- Please verify that you're not a robot. -

- -
-
- -
- -
- - -""" - -TERMS_TEMPLATE = """ - - -Authentication - - - - -
-
-

- Please click the button below if you agree to the - privacy policy of this homeserver. -

- - -
-
- - -""" - -SUCCESS_TEMPLATE = """ - - -Success! - - - - - -
-

Thank you

-

You may now close this window and return to the application

-
- - -""" - class AuthRestServlet(RestServlet): """ @@ -145,26 +57,30 @@ class AuthRestServlet(RestServlet): self._cas_server_url = hs.config.cas_server_url self._cas_service_url = hs.config.cas_service_url + self.recaptcha_template = hs.config.recaptcha_template + self.terms_template = hs.config.terms_template + self.success_template = hs.config.fallback_success_template + async def on_GET(self, request, stagetype): session = parse_string(request, "session") if not session: raise SynapseError(400, "No session supplied") if stagetype == LoginType.RECAPTCHA: - html = RECAPTCHA_TEMPLATE % { - "session": session, - "myurl": "%s/r0/auth/%s/fallback/web" + html = self.recaptcha_template.render( + session=session, + myurl="%s/r0/auth/%s/fallback/web" % (CLIENT_API_PREFIX, LoginType.RECAPTCHA), - "sitekey": self.hs.config.recaptcha_public_key, - } + sitekey=self.hs.config.recaptcha_public_key, + ) elif stagetype == LoginType.TERMS: - html = TERMS_TEMPLATE % { - "session": session, - "terms_url": "%s_matrix/consent?v=%s" + html = self.terms_template.render( + session=session, + terms_url="%s_matrix/consent?v=%s" % (self.hs.config.public_baseurl, self.hs.config.user_consent_version), - "myurl": "%s/r0/auth/%s/fallback/web" + myurl="%s/r0/auth/%s/fallback/web" % (CLIENT_API_PREFIX, LoginType.TERMS), - } + ) elif stagetype == LoginType.SSO: # Display a confirmation page which prompts the user to @@ -222,14 +138,14 @@ class AuthRestServlet(RestServlet): ) if success: - html = SUCCESS_TEMPLATE + html = self.success_template.render() else: - html = RECAPTCHA_TEMPLATE % { - "session": session, - "myurl": "%s/r0/auth/%s/fallback/web" + html = self.recaptcha_template.render( + session=session, + myurl="%s/r0/auth/%s/fallback/web" % (CLIENT_API_PREFIX, LoginType.RECAPTCHA), - "sitekey": self.hs.config.recaptcha_public_key, - } + sitekey=self.hs.config.recaptcha_public_key, + ) elif stagetype == LoginType.TERMS: authdict = {"session": session} @@ -238,18 +154,18 @@ class AuthRestServlet(RestServlet): ) if success: - html = SUCCESS_TEMPLATE + html = self.success_template.render() else: - html = TERMS_TEMPLATE % { - "session": session, - "terms_url": "%s_matrix/consent?v=%s" + html = self.terms_template.render( + session=session, + terms_url="%s_matrix/consent?v=%s" % ( self.hs.config.public_baseurl, self.hs.config.user_consent_version, ), - "myurl": "%s/r0/auth/%s/fallback/web" + myurl="%s/r0/auth/%s/fallback/web" % (CLIENT_API_PREFIX, LoginType.TERMS), - } + ) elif stagetype == LoginType.SSO: # The SSO fallback workflow should not post here, raise SynapseError(404, "Fallback SSO auth does not support POST requests.") From 695240d34a9dd1c34379ded1fbbbe42a1850549e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 2 Oct 2020 12:22:19 +0100 Subject: [PATCH 093/134] Fix DB query on startup for negative streams. (#8447) For negative streams we have to negate the internal stream ID before querying the DB. The effect of this bug was to query far too many rows, slowing start up time, but we would correctly filter the results afterwards so there was no ill effect. --- changelog.d/8447.bugfix | 1 + synapse/storage/util/id_generators.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8447.bugfix diff --git a/changelog.d/8447.bugfix b/changelog.d/8447.bugfix new file mode 100644 index 0000000000..88edaf322e --- /dev/null +++ b/changelog.d/8447.bugfix @@ -0,0 +1 @@ +Fix DB query on startup for negative streams which caused long start up times. Introduced in #8374. diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 02fbb656e8..48efbb5067 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -341,7 +341,7 @@ class MultiWriterIdGenerator: "cmp": "<=" if self._positive else ">=", } sql = self._db.engine.convert_param_style(sql) - cur.execute(sql, (min_stream_id,)) + cur.execute(sql, (min_stream_id * self._return_factor,)) self._persisted_upto_position = min_stream_id From 3bd2a2cbb1adffdbd0783ec58e88511cb4e90735 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 2 Oct 2020 07:24:07 -0400 Subject: [PATCH 094/134] Include a public_baseurl in configs generated by the demo script. (#8443) --- changelog.d/8443.misc | 1 + demo/start.sh | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 changelog.d/8443.misc diff --git a/changelog.d/8443.misc b/changelog.d/8443.misc new file mode 100644 index 0000000000..633598e6b3 --- /dev/null +++ b/changelog.d/8443.misc @@ -0,0 +1 @@ +Configure `public_baseurl` when using demo scripts. diff --git a/demo/start.sh b/demo/start.sh index 83396e5c33..f6b5ea137f 100755 --- a/demo/start.sh +++ b/demo/start.sh @@ -30,6 +30,8 @@ for port in 8080 8081 8082; do if ! grep -F "Customisation made by demo/start.sh" -q $DIR/etc/$port.config; then printf '\n\n# Customisation made by demo/start.sh\n' >> $DIR/etc/$port.config + echo "public_baseurl: http://localhost:$port/" >> $DIR/etc/$port.config + echo 'enable_registration: true' >> $DIR/etc/$port.config # Warning, this heredoc depends on the interaction of tabs and spaces. Please don't From 73d93039ff6c3addd54bb29a57808a3f2eed7a05 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 2 Oct 2020 12:29:29 +0100 Subject: [PATCH 095/134] Fix bug in remote thumbnail search (#8438) #7124 changed the behaviour of remote thumbnails so that the thumbnailing method was included in the filename of the thumbnail. To support existing files, it included a fallback so that we would check the old filename if the new filename didn't exist. Unfortunately, it didn't apply this logic to storage providers, so any thumbnails stored on such a storage provider was broken. --- changelog.d/8438.bugfix | 1 + synapse/rest/media/v1/media_storage.py | 43 ++++++++++++++------------ 2 files changed, 24 insertions(+), 20 deletions(-) create mode 100644 changelog.d/8438.bugfix diff --git a/changelog.d/8438.bugfix b/changelog.d/8438.bugfix new file mode 100644 index 0000000000..3edc394149 --- /dev/null +++ b/changelog.d/8438.bugfix @@ -0,0 +1 @@ +Fix a regression in v1.21.0rc1 which broke thumbnails of remote media. diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py index 5681677fc9..a9586fb0b7 100644 --- a/synapse/rest/media/v1/media_storage.py +++ b/synapse/rest/media/v1/media_storage.py @@ -141,31 +141,34 @@ class MediaStorage: Returns: Returns a Responder if the file was found, otherwise None. """ + paths = [self._file_info_to_path(file_info)] - path = self._file_info_to_path(file_info) - local_path = os.path.join(self.local_media_directory, path) - if os.path.exists(local_path): - return FileResponder(open(local_path, "rb")) - - # Fallback for paths without method names - # Should be removed in the future + # fallback for remote thumbnails with no method in the filename if file_info.thumbnail and file_info.server_name: - legacy_path = self.filepaths.remote_media_thumbnail_rel_legacy( - server_name=file_info.server_name, - file_id=file_info.file_id, - width=file_info.thumbnail_width, - height=file_info.thumbnail_height, - content_type=file_info.thumbnail_type, + paths.append( + self.filepaths.remote_media_thumbnail_rel_legacy( + server_name=file_info.server_name, + file_id=file_info.file_id, + width=file_info.thumbnail_width, + height=file_info.thumbnail_height, + content_type=file_info.thumbnail_type, + ) ) - legacy_local_path = os.path.join(self.local_media_directory, legacy_path) - if os.path.exists(legacy_local_path): - return FileResponder(open(legacy_local_path, "rb")) + + for path in paths: + local_path = os.path.join(self.local_media_directory, path) + if os.path.exists(local_path): + logger.debug("responding with local file %s", local_path) + return FileResponder(open(local_path, "rb")) + logger.debug("local file %s did not exist", local_path) for provider in self.storage_providers: - res = await provider.fetch(path, file_info) # type: Any - if res: - logger.debug("Streaming %s from %s", path, provider) - return res + for path in paths: + res = await provider.fetch(path, file_info) # type: Any + if res: + logger.debug("Streaming %s from %s", path, provider) + return res + logger.debug("%s not found on %s", path, provider) return None From f6c526ce6732a1af1228a08513f6a795b61c2b71 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 2 Oct 2020 12:46:58 +0100 Subject: [PATCH 096/134] 1.21.0rc2 --- CHANGES.md | 13 +++++++++++++ changelog.d/8438.bugfix | 1 - changelog.d/8440.bugfix | 1 - changelog.d/8442.bugfix | 1 - changelog.d/8444.bugfix | 1 - changelog.d/8447.bugfix | 1 - synapse/__init__.py | 2 +- 7 files changed, 14 insertions(+), 6 deletions(-) delete mode 100644 changelog.d/8438.bugfix delete mode 100644 changelog.d/8440.bugfix delete mode 100644 changelog.d/8442.bugfix delete mode 100644 changelog.d/8444.bugfix delete mode 100644 changelog.d/8447.bugfix diff --git a/CHANGES.md b/CHANGES.md index 29711c60ce..e5177e714d 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,16 @@ +Synapse 1.21.0rc2 (2020-10-02) +============================== + +Bugfixes +-------- + +- Fix a regression in v1.21.0rc1 which broke thumbnails of remote media. ([\#8438](https://github.com/matrix-org/synapse/issues/8438)) +- Do not expose the experimental `uk.half-shot.msc2778.login.application_service` flow in the login API. ([\#8440](https://github.com/matrix-org/synapse/issues/8440)) +- Fix malformed log line in new federation "catch up" logic. ([\#8442](https://github.com/matrix-org/synapse/issues/8442)) +- Convert additional templates from inline HTML to Jinja2 templates. ([\#8444](https://github.com/matrix-org/synapse/issues/8444)) +- Fix DB query on startup for negative streams which caused long start up times. Introduced in #8374. ([\#8447](https://github.com/matrix-org/synapse/issues/8447)) + + Synapse 1.21.0rc1 (2020-10-01) ============================== diff --git a/changelog.d/8438.bugfix b/changelog.d/8438.bugfix deleted file mode 100644 index 3edc394149..0000000000 --- a/changelog.d/8438.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a regression in v1.21.0rc1 which broke thumbnails of remote media. diff --git a/changelog.d/8440.bugfix b/changelog.d/8440.bugfix deleted file mode 100644 index 84d5f541d1..0000000000 --- a/changelog.d/8440.bugfix +++ /dev/null @@ -1 +0,0 @@ -Do not expose the experimental `uk.half-shot.msc2778.login.application_service` flow in the login API. diff --git a/changelog.d/8442.bugfix b/changelog.d/8442.bugfix deleted file mode 100644 index 6f779a1de5..0000000000 --- a/changelog.d/8442.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix malformed log line in new federation "catch up" logic. diff --git a/changelog.d/8444.bugfix b/changelog.d/8444.bugfix deleted file mode 100644 index 30c4328d4b..0000000000 --- a/changelog.d/8444.bugfix +++ /dev/null @@ -1 +0,0 @@ -Convert additional templates from inline HTML to Jinja2 templates. diff --git a/changelog.d/8447.bugfix b/changelog.d/8447.bugfix deleted file mode 100644 index 88edaf322e..0000000000 --- a/changelog.d/8447.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix DB query on startup for negative streams which caused long start up times. Introduced in #8374. diff --git a/synapse/__init__.py b/synapse/__init__.py index 4706974508..500558bbdf 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -48,7 +48,7 @@ try: except ImportError: pass -__version__ = "1.21.0rc1" +__version__ = "1.21.0rc2" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 6a8fd03acbce30c5f30f0225f21063e58f52eb37 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 2 Oct 2020 12:48:33 +0100 Subject: [PATCH 097/134] 1.21.0rc2 --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index e5177e714d..e9872ff052 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,7 +5,7 @@ Bugfixes -------- - Fix a regression in v1.21.0rc1 which broke thumbnails of remote media. ([\#8438](https://github.com/matrix-org/synapse/issues/8438)) -- Do not expose the experimental `uk.half-shot.msc2778.login.application_service` flow in the login API. ([\#8440](https://github.com/matrix-org/synapse/issues/8440)) +- Do not expose the experimental `uk.half-shot.msc2778.login.application_service` flow in the login API, which caused a compatibility problem with Element iOS. ([\#8440](https://github.com/matrix-org/synapse/issues/8440)) - Fix malformed log line in new federation "catch up" logic. ([\#8442](https://github.com/matrix-org/synapse/issues/8442)) - Convert additional templates from inline HTML to Jinja2 templates. ([\#8444](https://github.com/matrix-org/synapse/issues/8444)) - Fix DB query on startup for negative streams which caused long start up times. Introduced in #8374. ([\#8447](https://github.com/matrix-org/synapse/issues/8447)) From 8672642225c9415935345057411bc7da732cb16a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 2 Oct 2020 12:54:53 +0100 Subject: [PATCH 098/134] linkify changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index e9872ff052..0437e420bc 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -8,7 +8,7 @@ Bugfixes - Do not expose the experimental `uk.half-shot.msc2778.login.application_service` flow in the login API, which caused a compatibility problem with Element iOS. ([\#8440](https://github.com/matrix-org/synapse/issues/8440)) - Fix malformed log line in new federation "catch up" logic. ([\#8442](https://github.com/matrix-org/synapse/issues/8442)) - Convert additional templates from inline HTML to Jinja2 templates. ([\#8444](https://github.com/matrix-org/synapse/issues/8444)) -- Fix DB query on startup for negative streams which caused long start up times. Introduced in #8374. ([\#8447](https://github.com/matrix-org/synapse/issues/8447)) +- Fix DB query on startup for negative streams which caused long start up times. Introduced in [\#8374](https://github.com/matrix-org/synapse/issues/8374). ([\#8447](https://github.com/matrix-org/synapse/issues/8447)) Synapse 1.21.0rc1 (2020-10-01) From 9de6e9e249d7d2940e847b68fe9995154b1a3f74 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 2 Oct 2020 12:56:40 +0100 Subject: [PATCH 099/134] move #8444 to 'feature' --- CHANGES.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 0437e420bc..5d4e80499e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,13 +1,17 @@ Synapse 1.21.0rc2 (2020-10-02) ============================== +Features +-------- + +- Convert additional templates from inline HTML to Jinja2 templates. ([\#8444](https://github.com/matrix-org/synapse/issues/8444)) + Bugfixes -------- - Fix a regression in v1.21.0rc1 which broke thumbnails of remote media. ([\#8438](https://github.com/matrix-org/synapse/issues/8438)) - Do not expose the experimental `uk.half-shot.msc2778.login.application_service` flow in the login API, which caused a compatibility problem with Element iOS. ([\#8440](https://github.com/matrix-org/synapse/issues/8440)) - Fix malformed log line in new federation "catch up" logic. ([\#8442](https://github.com/matrix-org/synapse/issues/8442)) -- Convert additional templates from inline HTML to Jinja2 templates. ([\#8444](https://github.com/matrix-org/synapse/issues/8444)) - Fix DB query on startup for negative streams which caused long start up times. Introduced in [\#8374](https://github.com/matrix-org/synapse/issues/8374). ([\#8447](https://github.com/matrix-org/synapse/issues/8447)) From 62894673e69f7beb0d0a748ad01c2e95c5fed106 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 2 Oct 2020 08:23:15 -0400 Subject: [PATCH 100/134] Allow background tasks to be run on a separate worker. (#8369) --- changelog.d/8369.feature | 1 + docs/sample_config.yaml | 5 + docs/workers.md | 17 ++ synapse/app/_base.py | 6 + synapse/app/admin_cmd.py | 1 + synapse/app/generic_worker.py | 4 + synapse/app/homeserver.py | 182 ---------------- synapse/app/phone_stats_home.py | 202 ++++++++++++++++++ synapse/config/workers.py | 18 ++ synapse/handlers/auth.py | 2 +- synapse/handlers/stats.py | 2 +- synapse/server.py | 17 +- synapse/storage/databases/main/__init__.py | 191 ----------------- synapse/storage/databases/main/metrics.py | 195 +++++++++++++++++ .../databases/main/monthly_active_users.py | 109 +++++----- synapse/storage/databases/main/room.py | 24 +-- synapse/storage/databases/main/ui_auth.py | 6 +- tests/test_phone_home.py | 2 +- tests/utils.py | 2 +- 19 files changed, 537 insertions(+), 449 deletions(-) create mode 100644 changelog.d/8369.feature create mode 100644 synapse/app/phone_stats_home.py diff --git a/changelog.d/8369.feature b/changelog.d/8369.feature new file mode 100644 index 0000000000..542993110b --- /dev/null +++ b/changelog.d/8369.feature @@ -0,0 +1 @@ +Allow running background tasks in a separate worker process. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index b2c1d7a737..7126ade2de 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -2504,6 +2504,11 @@ opentracing: # events: worker1 # typing: worker1 +# The worker that is used to run background tasks (e.g. cleaning up expired +# data). If not provided this defaults to the main process. +# +#run_background_tasks_on: worker1 + # Configuration for Redis when using workers. This *must* be enabled when # using workers (unless using old style direct TCP configuration). diff --git a/docs/workers.md b/docs/workers.md index ad4d8ca9f2..84a9759e34 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -319,6 +319,23 @@ stream_writers: events: event_persister1 ``` +#### Background tasks + +There is also *experimental* support for moving background tasks to a separate +worker. Background tasks are run periodically or started via replication. Exactly +which tasks are configured to run depends on your Synapse configuration (e.g. if +stats is enabled). + +To enable this, the worker must have a `worker_name` and can be configured to run +background tasks. For example, to move background tasks to a dedicated worker, +the shared configuration would include: + +```yaml +run_background_tasks_on: background_worker +``` + +You might also wish to investigate the `update_user_directory` and +`media_instance_running_background_jobs` settings. ### `synapse.app.pusher` diff --git a/synapse/app/_base.py b/synapse/app/_base.py index fb476ddaf5..8bb0b142ca 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -28,6 +28,7 @@ from twisted.protocols.tls import TLSMemoryBIOFactory import synapse from synapse.app import check_bind_error +from synapse.app.phone_stats_home import start_phone_stats_home from synapse.config.server import ListenerConfig from synapse.crypto import context_factory from synapse.logging.context import PreserveLoggingContext @@ -274,6 +275,11 @@ def start(hs: "synapse.server.HomeServer", listeners: Iterable[ListenerConfig]): setup_sentry(hs) setup_sdnotify(hs) + # If background tasks are running on the main process, start collecting the + # phone home stats. + if hs.config.run_background_tasks: + start_phone_stats_home(hs) + # We now freeze all allocated objects in the hopes that (almost) # everything currently allocated are things that will be used for the # rest of time. Doing so means less work each GC (hopefully). diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index 7d309b1bb0..f0d65d08d7 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -208,6 +208,7 @@ def start(config_options): # Explicitly disable background processes config.update_user_directory = False + config.run_background_tasks = False config.start_pushers = False config.send_federation = False diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index c38413c893..fc5188ce95 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -128,11 +128,13 @@ from synapse.rest.key.v2 import KeyApiV2Resource from synapse.server import HomeServer, cache_in_self from synapse.storage.databases.main.censor_events import CensorEventsStore from synapse.storage.databases.main.media_repository import MediaRepositoryStore +from synapse.storage.databases.main.metrics import ServerMetricsStore from synapse.storage.databases.main.monthly_active_users import ( MonthlyActiveUsersWorkerStore, ) from synapse.storage.databases.main.presence import UserPresenceState from synapse.storage.databases.main.search import SearchWorkerStore +from synapse.storage.databases.main.stats import StatsStore from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore from synapse.storage.databases.main.user_directory import UserDirectoryStore from synapse.types import ReadReceipt @@ -454,6 +456,7 @@ class GenericWorkerSlavedStore( # FIXME(#3714): We need to add UserDirectoryStore as we write directly # rather than going via the correct worker. UserDirectoryStore, + StatsStore, UIAuthWorkerStore, SlavedDeviceInboxStore, SlavedDeviceStore, @@ -476,6 +479,7 @@ class GenericWorkerSlavedStore( SlavedFilteringStore, MonthlyActiveUsersWorkerStore, MediaRepositoryStore, + ServerMetricsStore, SearchWorkerStore, BaseSlavedStore, ): diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index dff739e106..4ed4a2c253 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -17,14 +17,10 @@ import gc import logging -import math import os -import resource import sys from typing import Iterable -from prometheus_client import Gauge - from twisted.application import service from twisted.internet import defer, reactor from twisted.python.failure import Failure @@ -60,7 +56,6 @@ from synapse.http.server import ( from synapse.http.site import SynapseSite from synapse.logging.context import LoggingContext from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.module_api import ModuleApi from synapse.python_dependencies import check_requirements from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource @@ -334,20 +329,6 @@ class SynapseHomeServer(HomeServer): logger.warning("Unrecognized listener type: %s", listener.type) -# Gauges to expose monthly active user control metrics -current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU") -current_mau_by_service_gauge = Gauge( - "synapse_admin_mau_current_mau_by_service", - "Current MAU by service", - ["app_service"], -) -max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit") -registered_reserved_users_mau_gauge = Gauge( - "synapse_admin_mau:registered_reserved_users", - "Registered users with reserved threepids", -) - - def setup(config_options): """ Args: @@ -389,8 +370,6 @@ def setup(config_options): except UpgradeDatabaseException as e: quit_with_error("Failed to upgrade database: %s" % (e,)) - hs.setup_master() - async def do_acme() -> bool: """ Reprovision an ACME certificate, if it's required. @@ -486,92 +465,6 @@ class SynapseService(service.Service): return self._port.stopListening() -# Contains the list of processes we will be monitoring -# currently either 0 or 1 -_stats_process = [] - - -async def phone_stats_home(hs, stats, stats_process=_stats_process): - logger.info("Gathering stats for reporting") - now = int(hs.get_clock().time()) - uptime = int(now - hs.start_time) - if uptime < 0: - uptime = 0 - - # - # Performance statistics. Keep this early in the function to maintain reliability of `test_performance_100` test. - # - old = stats_process[0] - new = (now, resource.getrusage(resource.RUSAGE_SELF)) - stats_process[0] = new - - # Get RSS in bytes - stats["memory_rss"] = new[1].ru_maxrss - - # Get CPU time in % of a single core, not % of all cores - used_cpu_time = (new[1].ru_utime + new[1].ru_stime) - ( - old[1].ru_utime + old[1].ru_stime - ) - if used_cpu_time == 0 or new[0] == old[0]: - stats["cpu_average"] = 0 - else: - stats["cpu_average"] = math.floor(used_cpu_time / (new[0] - old[0]) * 100) - - # - # General statistics - # - - stats["homeserver"] = hs.config.server_name - stats["server_context"] = hs.config.server_context - stats["timestamp"] = now - stats["uptime_seconds"] = uptime - version = sys.version_info - stats["python_version"] = "{}.{}.{}".format( - version.major, version.minor, version.micro - ) - stats["total_users"] = await hs.get_datastore().count_all_users() - - total_nonbridged_users = await hs.get_datastore().count_nonbridged_users() - stats["total_nonbridged_users"] = total_nonbridged_users - - daily_user_type_results = await hs.get_datastore().count_daily_user_type() - for name, count in daily_user_type_results.items(): - stats["daily_user_type_" + name] = count - - room_count = await hs.get_datastore().get_room_count() - stats["total_room_count"] = room_count - - stats["daily_active_users"] = await hs.get_datastore().count_daily_users() - stats["monthly_active_users"] = await hs.get_datastore().count_monthly_users() - stats["daily_active_rooms"] = await hs.get_datastore().count_daily_active_rooms() - stats["daily_messages"] = await hs.get_datastore().count_daily_messages() - - r30_results = await hs.get_datastore().count_r30_users() - for name, count in r30_results.items(): - stats["r30_users_" + name] = count - - daily_sent_messages = await hs.get_datastore().count_daily_sent_messages() - stats["daily_sent_messages"] = daily_sent_messages - stats["cache_factor"] = hs.config.caches.global_factor - stats["event_cache_size"] = hs.config.caches.event_cache_size - - # - # Database version - # - - # This only reports info about the *main* database. - stats["database_engine"] = hs.get_datastore().db_pool.engine.module.__name__ - stats["database_server_version"] = hs.get_datastore().db_pool.engine.server_version - - logger.info("Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats)) - try: - await hs.get_proxied_http_client().put_json( - hs.config.report_stats_endpoint, stats - ) - except Exception as e: - logger.warning("Error reporting stats: %s", e) - - def run(hs): PROFILE_SYNAPSE = False if PROFILE_SYNAPSE: @@ -597,81 +490,6 @@ def run(hs): ThreadPool._worker = profile(ThreadPool._worker) reactor.run = profile(reactor.run) - clock = hs.get_clock() - - stats = {} - - def performance_stats_init(): - _stats_process.clear() - _stats_process.append( - (int(hs.get_clock().time()), resource.getrusage(resource.RUSAGE_SELF)) - ) - - def start_phone_stats_home(): - return run_as_background_process( - "phone_stats_home", phone_stats_home, hs, stats - ) - - def generate_user_daily_visit_stats(): - return run_as_background_process( - "generate_user_daily_visits", hs.get_datastore().generate_user_daily_visits - ) - - # Rather than update on per session basis, batch up the requests. - # If you increase the loop period, the accuracy of user_daily_visits - # table will decrease - clock.looping_call(generate_user_daily_visit_stats, 5 * 60 * 1000) - - # monthly active user limiting functionality - def reap_monthly_active_users(): - return run_as_background_process( - "reap_monthly_active_users", hs.get_datastore().reap_monthly_active_users - ) - - clock.looping_call(reap_monthly_active_users, 1000 * 60 * 60) - reap_monthly_active_users() - - async def generate_monthly_active_users(): - current_mau_count = 0 - current_mau_count_by_service = {} - reserved_users = () - store = hs.get_datastore() - if hs.config.limit_usage_by_mau or hs.config.mau_stats_only: - current_mau_count = await store.get_monthly_active_count() - current_mau_count_by_service = ( - await store.get_monthly_active_count_by_service() - ) - reserved_users = await store.get_registered_reserved_users() - current_mau_gauge.set(float(current_mau_count)) - - for app_service, count in current_mau_count_by_service.items(): - current_mau_by_service_gauge.labels(app_service).set(float(count)) - - registered_reserved_users_mau_gauge.set(float(len(reserved_users))) - max_mau_gauge.set(float(hs.config.max_mau_value)) - - def start_generate_monthly_active_users(): - return run_as_background_process( - "generate_monthly_active_users", generate_monthly_active_users - ) - - start_generate_monthly_active_users() - if hs.config.limit_usage_by_mau or hs.config.mau_stats_only: - clock.looping_call(start_generate_monthly_active_users, 5 * 60 * 1000) - # End of monthly active user settings - - if hs.config.report_stats: - logger.info("Scheduling stats reporting for 3 hour intervals") - clock.looping_call(start_phone_stats_home, 3 * 60 * 60 * 1000) - - # We need to defer this init for the cases that we daemonize - # otherwise the process ID we get is that of the non-daemon process - clock.call_later(0, performance_stats_init) - - # We wait 5 minutes to send the first set of stats as the server can - # be quite busy the first few minutes - clock.call_later(5 * 60, start_phone_stats_home) - _base.start_reactor( "synapse-homeserver", soft_file_limit=hs.config.soft_file_limit, diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py new file mode 100644 index 0000000000..2c8e14a8c0 --- /dev/null +++ b/synapse/app/phone_stats_home.py @@ -0,0 +1,202 @@ +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import math +import resource +import sys + +from prometheus_client import Gauge + +from synapse.metrics.background_process_metrics import run_as_background_process + +logger = logging.getLogger("synapse.app.homeserver") + +# Contains the list of processes we will be monitoring +# currently either 0 or 1 +_stats_process = [] + +# Gauges to expose monthly active user control metrics +current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU") +current_mau_by_service_gauge = Gauge( + "synapse_admin_mau_current_mau_by_service", + "Current MAU by service", + ["app_service"], +) +max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit") +registered_reserved_users_mau_gauge = Gauge( + "synapse_admin_mau:registered_reserved_users", + "Registered users with reserved threepids", +) + + +async def phone_stats_home(hs, stats, stats_process=_stats_process): + logger.info("Gathering stats for reporting") + now = int(hs.get_clock().time()) + uptime = int(now - hs.start_time) + if uptime < 0: + uptime = 0 + + # + # Performance statistics. Keep this early in the function to maintain reliability of `test_performance_100` test. + # + old = stats_process[0] + new = (now, resource.getrusage(resource.RUSAGE_SELF)) + stats_process[0] = new + + # Get RSS in bytes + stats["memory_rss"] = new[1].ru_maxrss + + # Get CPU time in % of a single core, not % of all cores + used_cpu_time = (new[1].ru_utime + new[1].ru_stime) - ( + old[1].ru_utime + old[1].ru_stime + ) + if used_cpu_time == 0 or new[0] == old[0]: + stats["cpu_average"] = 0 + else: + stats["cpu_average"] = math.floor(used_cpu_time / (new[0] - old[0]) * 100) + + # + # General statistics + # + + stats["homeserver"] = hs.config.server_name + stats["server_context"] = hs.config.server_context + stats["timestamp"] = now + stats["uptime_seconds"] = uptime + version = sys.version_info + stats["python_version"] = "{}.{}.{}".format( + version.major, version.minor, version.micro + ) + stats["total_users"] = await hs.get_datastore().count_all_users() + + total_nonbridged_users = await hs.get_datastore().count_nonbridged_users() + stats["total_nonbridged_users"] = total_nonbridged_users + + daily_user_type_results = await hs.get_datastore().count_daily_user_type() + for name, count in daily_user_type_results.items(): + stats["daily_user_type_" + name] = count + + room_count = await hs.get_datastore().get_room_count() + stats["total_room_count"] = room_count + + stats["daily_active_users"] = await hs.get_datastore().count_daily_users() + stats["monthly_active_users"] = await hs.get_datastore().count_monthly_users() + stats["daily_active_rooms"] = await hs.get_datastore().count_daily_active_rooms() + stats["daily_messages"] = await hs.get_datastore().count_daily_messages() + + r30_results = await hs.get_datastore().count_r30_users() + for name, count in r30_results.items(): + stats["r30_users_" + name] = count + + daily_sent_messages = await hs.get_datastore().count_daily_sent_messages() + stats["daily_sent_messages"] = daily_sent_messages + stats["cache_factor"] = hs.config.caches.global_factor + stats["event_cache_size"] = hs.config.caches.event_cache_size + + # + # Database version + # + + # This only reports info about the *main* database. + stats["database_engine"] = hs.get_datastore().db_pool.engine.module.__name__ + stats["database_server_version"] = hs.get_datastore().db_pool.engine.server_version + + logger.info("Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats)) + try: + await hs.get_proxied_http_client().put_json( + hs.config.report_stats_endpoint, stats + ) + except Exception as e: + logger.warning("Error reporting stats: %s", e) + + +def start_phone_stats_home(hs): + """ + Start the background tasks which report phone home stats. + """ + clock = hs.get_clock() + + stats = {} + + def performance_stats_init(): + _stats_process.clear() + _stats_process.append( + (int(hs.get_clock().time()), resource.getrusage(resource.RUSAGE_SELF)) + ) + + def start_phone_stats_home(): + return run_as_background_process( + "phone_stats_home", phone_stats_home, hs, stats + ) + + def generate_user_daily_visit_stats(): + return run_as_background_process( + "generate_user_daily_visits", hs.get_datastore().generate_user_daily_visits + ) + + # Rather than update on per session basis, batch up the requests. + # If you increase the loop period, the accuracy of user_daily_visits + # table will decrease + clock.looping_call(generate_user_daily_visit_stats, 5 * 60 * 1000) + + # monthly active user limiting functionality + def reap_monthly_active_users(): + return run_as_background_process( + "reap_monthly_active_users", hs.get_datastore().reap_monthly_active_users + ) + + clock.looping_call(reap_monthly_active_users, 1000 * 60 * 60) + reap_monthly_active_users() + + async def generate_monthly_active_users(): + current_mau_count = 0 + current_mau_count_by_service = {} + reserved_users = () + store = hs.get_datastore() + if hs.config.limit_usage_by_mau or hs.config.mau_stats_only: + current_mau_count = await store.get_monthly_active_count() + current_mau_count_by_service = ( + await store.get_monthly_active_count_by_service() + ) + reserved_users = await store.get_registered_reserved_users() + current_mau_gauge.set(float(current_mau_count)) + + for app_service, count in current_mau_count_by_service.items(): + current_mau_by_service_gauge.labels(app_service).set(float(count)) + + registered_reserved_users_mau_gauge.set(float(len(reserved_users))) + max_mau_gauge.set(float(hs.config.max_mau_value)) + + def start_generate_monthly_active_users(): + return run_as_background_process( + "generate_monthly_active_users", generate_monthly_active_users + ) + + if hs.config.limit_usage_by_mau or hs.config.mau_stats_only: + start_generate_monthly_active_users() + clock.looping_call(start_generate_monthly_active_users, 5 * 60 * 1000) + # End of monthly active user settings + + if hs.config.report_stats: + logger.info("Scheduling stats reporting for 3 hour intervals") + clock.looping_call(start_phone_stats_home, 3 * 60 * 60 * 1000) + + # We need to defer this init for the cases that we daemonize + # otherwise the process ID we get is that of the non-daemon process + clock.call_later(0, performance_stats_init) + + # We wait 5 minutes to send the first set of stats as the server can + # be quite busy the first few minutes + clock.call_later(5 * 60, start_phone_stats_home) diff --git a/synapse/config/workers.py b/synapse/config/workers.py index f23e42cdf9..57ab097eba 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -132,6 +132,19 @@ class WorkerConfig(Config): self.events_shard_config = ShardedWorkerHandlingConfig(self.writers.events) + # Whether this worker should run background tasks or not. + # + # As a note for developers, the background tasks guarded by this should + # be able to run on only a single instance (meaning that they don't + # depend on any in-memory state of a particular worker). + # + # No effort is made to ensure only a single instance of these tasks is + # running. + background_tasks_instance = config.get("run_background_tasks_on") or "master" + self.run_background_tasks = ( + self.worker_name is None and background_tasks_instance == "master" + ) or self.worker_name == background_tasks_instance + def generate_config_section(self, config_dir_path, server_name, **kwargs): return """\ ## Workers ## @@ -167,6 +180,11 @@ class WorkerConfig(Config): #stream_writers: # events: worker1 # typing: worker1 + + # The worker that is used to run background tasks (e.g. cleaning up expired + # data). If not provided this defaults to the main process. + # + #run_background_tasks_on: worker1 """ def read_arguments(self, args): diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 00eae92052..7c4b716b28 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -212,7 +212,7 @@ class AuthHandler(BaseHandler): self._clock = self.hs.get_clock() # Expire old UI auth sessions after a period of time. - if hs.config.worker_app is None: + if hs.config.run_background_tasks: self._clock.looping_call( run_as_background_process, 5 * 60 * 1000, diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py index 249ffe2a55..dc62b21c06 100644 --- a/synapse/handlers/stats.py +++ b/synapse/handlers/stats.py @@ -49,7 +49,7 @@ class StatsHandler: # Guard to ensure we only process deltas one at a time self._is_processing = False - if hs.config.stats_enabled: + if self.stats_enabled and hs.config.run_background_tasks: self.notifier.add_replication_callback(self.notify_new_event) # We kick this off so that we don't have to wait for a change before diff --git a/synapse/server.py b/synapse/server.py index 5e3752c333..aa2273955c 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -185,7 +185,10 @@ class HomeServer(metaclass=abc.ABCMeta): we are listening on to provide HTTP services. """ - REQUIRED_ON_MASTER_STARTUP = ["user_directory_handler", "stats_handler"] + REQUIRED_ON_BACKGROUND_TASK_STARTUP = [ + "auth", + "stats", + ] # This is overridden in derived application classes # (such as synapse.app.homeserver.SynapseHomeServer) and gives the class to be @@ -251,14 +254,20 @@ class HomeServer(metaclass=abc.ABCMeta): self.datastores = Databases(self.DATASTORE_CLASS, self) logger.info("Finished setting up.") - def setup_master(self) -> None: + # Register background tasks required by this server. This must be done + # somewhat manually due to the background tasks not being registered + # unless handlers are instantiated. + if self.config.run_background_tasks: + self.setup_background_tasks() + + def setup_background_tasks(self) -> None: """ Some handlers have side effects on instantiation (like registering background updates). This function causes them to be fetched, and therefore instantiated, to run those side effects. """ - for i in self.REQUIRED_ON_MASTER_STARTUP: - getattr(self, "get_" + i)() + for i in self.REQUIRED_ON_BACKGROUND_TASK_STARTUP: + getattr(self, "get_" + i + "_handler")() def get_reactor(self) -> twisted.internet.base.ReactorBase: """ diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 0cb12f4c61..f823d66709 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -15,9 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import calendar import logging -import time from typing import Any, Dict, List, Optional, Tuple from synapse.api.constants import PresenceState @@ -268,9 +266,6 @@ class DataStore( self._stream_order_on_start = self.get_room_max_stream_ordering() self._min_stream_order_on_start = self.get_room_min_stream_ordering() - # Used in _generate_user_daily_visits to keep track of progress - self._last_user_visit_update = self._get_start_of_day() - def get_device_stream_token(self) -> int: return self._device_list_id_gen.get_current_token() @@ -301,192 +296,6 @@ class DataStore( return [UserPresenceState(**row) for row in rows] - async def count_daily_users(self) -> int: - """ - Counts the number of users who used this homeserver in the last 24 hours. - """ - yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24) - return await self.db_pool.runInteraction( - "count_daily_users", self._count_users, yesterday - ) - - async def count_monthly_users(self) -> int: - """ - Counts the number of users who used this homeserver in the last 30 days. - Note this method is intended for phonehome metrics only and is different - from the mau figure in synapse.storage.monthly_active_users which, - amongst other things, includes a 3 day grace period before a user counts. - """ - thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30) - return await self.db_pool.runInteraction( - "count_monthly_users", self._count_users, thirty_days_ago - ) - - def _count_users(self, txn, time_from): - """ - Returns number of users seen in the past time_from period - """ - sql = """ - SELECT COALESCE(count(*), 0) FROM ( - SELECT user_id FROM user_ips - WHERE last_seen > ? - GROUP BY user_id - ) u - """ - txn.execute(sql, (time_from,)) - (count,) = txn.fetchone() - return count - - async def count_r30_users(self) -> Dict[str, int]: - """ - Counts the number of 30 day retained users, defined as:- - * Users who have created their accounts more than 30 days ago - * Where last seen at most 30 days ago - * Where account creation and last_seen are > 30 days apart - - Returns: - A mapping of counts globally as well as broken out by platform. - """ - - def _count_r30_users(txn): - thirty_days_in_secs = 86400 * 30 - now = int(self._clock.time()) - thirty_days_ago_in_secs = now - thirty_days_in_secs - - sql = """ - SELECT platform, COALESCE(count(*), 0) FROM ( - SELECT - users.name, platform, users.creation_ts * 1000, - MAX(uip.last_seen) - FROM users - INNER JOIN ( - SELECT - user_id, - last_seen, - CASE - WHEN user_agent LIKE '%%Android%%' THEN 'android' - WHEN user_agent LIKE '%%iOS%%' THEN 'ios' - WHEN user_agent LIKE '%%Electron%%' THEN 'electron' - WHEN user_agent LIKE '%%Mozilla%%' THEN 'web' - WHEN user_agent LIKE '%%Gecko%%' THEN 'web' - ELSE 'unknown' - END - AS platform - FROM user_ips - ) uip - ON users.name = uip.user_id - AND users.appservice_id is NULL - AND users.creation_ts < ? - AND uip.last_seen/1000 > ? - AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30 - GROUP BY users.name, platform, users.creation_ts - ) u GROUP BY platform - """ - - results = {} - txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs)) - - for row in txn: - if row[0] == "unknown": - pass - results[row[0]] = row[1] - - sql = """ - SELECT COALESCE(count(*), 0) FROM ( - SELECT users.name, users.creation_ts * 1000, - MAX(uip.last_seen) - FROM users - INNER JOIN ( - SELECT - user_id, - last_seen - FROM user_ips - ) uip - ON users.name = uip.user_id - AND appservice_id is NULL - AND users.creation_ts < ? - AND uip.last_seen/1000 > ? - AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30 - GROUP BY users.name, users.creation_ts - ) u - """ - - txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs)) - - (count,) = txn.fetchone() - results["all"] = count - - return results - - return await self.db_pool.runInteraction("count_r30_users", _count_r30_users) - - def _get_start_of_day(self): - """ - Returns millisecond unixtime for start of UTC day. - """ - now = time.gmtime() - today_start = calendar.timegm((now.tm_year, now.tm_mon, now.tm_mday, 0, 0, 0)) - return today_start * 1000 - - async def generate_user_daily_visits(self) -> None: - """ - Generates daily visit data for use in cohort/ retention analysis - """ - - def _generate_user_daily_visits(txn): - logger.info("Calling _generate_user_daily_visits") - today_start = self._get_start_of_day() - a_day_in_milliseconds = 24 * 60 * 60 * 1000 - now = self.clock.time_msec() - - sql = """ - INSERT INTO user_daily_visits (user_id, device_id, timestamp) - SELECT u.user_id, u.device_id, ? - FROM user_ips AS u - LEFT JOIN ( - SELECT user_id, device_id, timestamp FROM user_daily_visits - WHERE timestamp = ? - ) udv - ON u.user_id = udv.user_id AND u.device_id=udv.device_id - INNER JOIN users ON users.name=u.user_id - WHERE last_seen > ? AND last_seen <= ? - AND udv.timestamp IS NULL AND users.is_guest=0 - AND users.appservice_id IS NULL - GROUP BY u.user_id, u.device_id - """ - - # This means that the day has rolled over but there could still - # be entries from the previous day. There is an edge case - # where if the user logs in at 23:59 and overwrites their - # last_seen at 00:01 then they will not be counted in the - # previous day's stats - it is important that the query is run - # often to minimise this case. - if today_start > self._last_user_visit_update: - yesterday_start = today_start - a_day_in_milliseconds - txn.execute( - sql, - ( - yesterday_start, - yesterday_start, - self._last_user_visit_update, - today_start, - ), - ) - self._last_user_visit_update = today_start - - txn.execute( - sql, (today_start, today_start, self._last_user_visit_update, now) - ) - # Update _last_user_visit_update to now. The reason to do this - # rather just clamping to the beginning of the day is to limit - # the size of the join - meaning that the query can be run more - # frequently - self._last_user_visit_update = now - - await self.db_pool.runInteraction( - "generate_user_daily_visits", _generate_user_daily_visits - ) - async def get_users(self) -> List[Dict[str, Any]]: """Function to retrieve a list of users in users table. diff --git a/synapse/storage/databases/main/metrics.py b/synapse/storage/databases/main/metrics.py index 92099f95ce..2c5a4fdbf6 100644 --- a/synapse/storage/databases/main/metrics.py +++ b/synapse/storage/databases/main/metrics.py @@ -12,6 +12,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import calendar +import logging +import time +from typing import Dict from synapse.metrics import GaugeBucketCollector from synapse.metrics.background_process_metrics import run_as_background_process @@ -21,6 +25,8 @@ from synapse.storage.databases.main.event_push_actions import ( EventPushActionsWorkerStore, ) +logger = logging.getLogger(__name__) + # Collect metrics on the number of forward extremities that exist. _extremities_collecter = GaugeBucketCollector( "synapse_forward_extremities", @@ -60,6 +66,9 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): hs.get_clock().looping_call(read_forward_extremities, 60 * 60 * 1000) + # Used in _generate_user_daily_visits to keep track of progress + self._last_user_visit_update = self._get_start_of_day() + async def _read_forward_extremities(self): def fetch(txn): txn.execute( @@ -137,3 +146,189 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): return count return await self.db_pool.runInteraction("count_daily_active_rooms", _count) + + async def count_daily_users(self) -> int: + """ + Counts the number of users who used this homeserver in the last 24 hours. + """ + yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24) + return await self.db_pool.runInteraction( + "count_daily_users", self._count_users, yesterday + ) + + async def count_monthly_users(self) -> int: + """ + Counts the number of users who used this homeserver in the last 30 days. + Note this method is intended for phonehome metrics only and is different + from the mau figure in synapse.storage.monthly_active_users which, + amongst other things, includes a 3 day grace period before a user counts. + """ + thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30) + return await self.db_pool.runInteraction( + "count_monthly_users", self._count_users, thirty_days_ago + ) + + def _count_users(self, txn, time_from): + """ + Returns number of users seen in the past time_from period + """ + sql = """ + SELECT COALESCE(count(*), 0) FROM ( + SELECT user_id FROM user_ips + WHERE last_seen > ? + GROUP BY user_id + ) u + """ + txn.execute(sql, (time_from,)) + (count,) = txn.fetchone() + return count + + async def count_r30_users(self) -> Dict[str, int]: + """ + Counts the number of 30 day retained users, defined as:- + * Users who have created their accounts more than 30 days ago + * Where last seen at most 30 days ago + * Where account creation and last_seen are > 30 days apart + + Returns: + A mapping of counts globally as well as broken out by platform. + """ + + def _count_r30_users(txn): + thirty_days_in_secs = 86400 * 30 + now = int(self._clock.time()) + thirty_days_ago_in_secs = now - thirty_days_in_secs + + sql = """ + SELECT platform, COALESCE(count(*), 0) FROM ( + SELECT + users.name, platform, users.creation_ts * 1000, + MAX(uip.last_seen) + FROM users + INNER JOIN ( + SELECT + user_id, + last_seen, + CASE + WHEN user_agent LIKE '%%Android%%' THEN 'android' + WHEN user_agent LIKE '%%iOS%%' THEN 'ios' + WHEN user_agent LIKE '%%Electron%%' THEN 'electron' + WHEN user_agent LIKE '%%Mozilla%%' THEN 'web' + WHEN user_agent LIKE '%%Gecko%%' THEN 'web' + ELSE 'unknown' + END + AS platform + FROM user_ips + ) uip + ON users.name = uip.user_id + AND users.appservice_id is NULL + AND users.creation_ts < ? + AND uip.last_seen/1000 > ? + AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30 + GROUP BY users.name, platform, users.creation_ts + ) u GROUP BY platform + """ + + results = {} + txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs)) + + for row in txn: + if row[0] == "unknown": + pass + results[row[0]] = row[1] + + sql = """ + SELECT COALESCE(count(*), 0) FROM ( + SELECT users.name, users.creation_ts * 1000, + MAX(uip.last_seen) + FROM users + INNER JOIN ( + SELECT + user_id, + last_seen + FROM user_ips + ) uip + ON users.name = uip.user_id + AND appservice_id is NULL + AND users.creation_ts < ? + AND uip.last_seen/1000 > ? + AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30 + GROUP BY users.name, users.creation_ts + ) u + """ + + txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs)) + + (count,) = txn.fetchone() + results["all"] = count + + return results + + return await self.db_pool.runInteraction("count_r30_users", _count_r30_users) + + def _get_start_of_day(self): + """ + Returns millisecond unixtime for start of UTC day. + """ + now = time.gmtime() + today_start = calendar.timegm((now.tm_year, now.tm_mon, now.tm_mday, 0, 0, 0)) + return today_start * 1000 + + async def generate_user_daily_visits(self) -> None: + """ + Generates daily visit data for use in cohort/ retention analysis + """ + + def _generate_user_daily_visits(txn): + logger.info("Calling _generate_user_daily_visits") + today_start = self._get_start_of_day() + a_day_in_milliseconds = 24 * 60 * 60 * 1000 + now = self._clock.time_msec() + + sql = """ + INSERT INTO user_daily_visits (user_id, device_id, timestamp) + SELECT u.user_id, u.device_id, ? + FROM user_ips AS u + LEFT JOIN ( + SELECT user_id, device_id, timestamp FROM user_daily_visits + WHERE timestamp = ? + ) udv + ON u.user_id = udv.user_id AND u.device_id=udv.device_id + INNER JOIN users ON users.name=u.user_id + WHERE last_seen > ? AND last_seen <= ? + AND udv.timestamp IS NULL AND users.is_guest=0 + AND users.appservice_id IS NULL + GROUP BY u.user_id, u.device_id + """ + + # This means that the day has rolled over but there could still + # be entries from the previous day. There is an edge case + # where if the user logs in at 23:59 and overwrites their + # last_seen at 00:01 then they will not be counted in the + # previous day's stats - it is important that the query is run + # often to minimise this case. + if today_start > self._last_user_visit_update: + yesterday_start = today_start - a_day_in_milliseconds + txn.execute( + sql, + ( + yesterday_start, + yesterday_start, + self._last_user_visit_update, + today_start, + ), + ) + self._last_user_visit_update = today_start + + txn.execute( + sql, (today_start, today_start, self._last_user_visit_update, now) + ) + # Update _last_user_visit_update to now. The reason to do this + # rather just clamping to the beginning of the day is to limit + # the size of the join - meaning that the query can be run more + # frequently + self._last_user_visit_update = now + + await self.db_pool.runInteraction( + "generate_user_daily_visits", _generate_user_daily_visits + ) diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py index e93aad33cd..b2127598ef 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py @@ -32,6 +32,9 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore): self._clock = hs.get_clock() self.hs = hs + self._limit_usage_by_mau = hs.config.limit_usage_by_mau + self._max_mau_value = hs.config.max_mau_value + @cached(num_args=0) async def get_monthly_active_count(self) -> int: """Generates current count of monthly active users @@ -124,60 +127,6 @@ class MonthlyActiveUsersWorkerStore(SQLBaseStore): desc="user_last_seen_monthly_active", ) - -class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore): - def __init__(self, database: DatabasePool, db_conn, hs): - super().__init__(database, db_conn, hs) - - self._limit_usage_by_mau = hs.config.limit_usage_by_mau - self._mau_stats_only = hs.config.mau_stats_only - self._max_mau_value = hs.config.max_mau_value - - # Do not add more reserved users than the total allowable number - # cur = LoggingTransaction( - self.db_pool.new_transaction( - db_conn, - "initialise_mau_threepids", - [], - [], - self._initialise_reserved_users, - hs.config.mau_limits_reserved_threepids[: self._max_mau_value], - ) - - def _initialise_reserved_users(self, txn, threepids): - """Ensures that reserved threepids are accounted for in the MAU table, should - be called on start up. - - Args: - txn (cursor): - threepids (list[dict]): List of threepid dicts to reserve - """ - - # XXX what is this function trying to achieve? It upserts into - # monthly_active_users for each *registered* reserved mau user, but why? - # - # - shouldn't there already be an entry for each reserved user (at least - # if they have been active recently)? - # - # - if it's important that the timestamp is kept up to date, why do we only - # run this at startup? - - for tp in threepids: - user_id = self.get_user_id_by_threepid_txn(txn, tp["medium"], tp["address"]) - - if user_id: - is_support = self.is_support_user_txn(txn, user_id) - if not is_support: - # We do this manually here to avoid hitting #6791 - self.db_pool.simple_upsert_txn( - txn, - table="monthly_active_users", - keyvalues={"user_id": user_id}, - values={"timestamp": int(self._clock.time_msec())}, - ) - else: - logger.warning("mau limit reserved threepid %s not found in db" % tp) - async def reap_monthly_active_users(self): """Cleans out monthly active user table to ensure that no stale entries exist. @@ -257,6 +206,58 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore): "reap_monthly_active_users", _reap_users, reserved_users ) + +class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore): + def __init__(self, database: DatabasePool, db_conn, hs): + super().__init__(database, db_conn, hs) + + self._mau_stats_only = hs.config.mau_stats_only + + # Do not add more reserved users than the total allowable number + # cur = LoggingTransaction( + self.db_pool.new_transaction( + db_conn, + "initialise_mau_threepids", + [], + [], + self._initialise_reserved_users, + hs.config.mau_limits_reserved_threepids[: self._max_mau_value], + ) + + def _initialise_reserved_users(self, txn, threepids): + """Ensures that reserved threepids are accounted for in the MAU table, should + be called on start up. + + Args: + txn (cursor): + threepids (list[dict]): List of threepid dicts to reserve + """ + + # XXX what is this function trying to achieve? It upserts into + # monthly_active_users for each *registered* reserved mau user, but why? + # + # - shouldn't there already be an entry for each reserved user (at least + # if they have been active recently)? + # + # - if it's important that the timestamp is kept up to date, why do we only + # run this at startup? + + for tp in threepids: + user_id = self.get_user_id_by_threepid_txn(txn, tp["medium"], tp["address"]) + + if user_id: + is_support = self.is_support_user_txn(txn, user_id) + if not is_support: + # We do this manually here to avoid hitting #6791 + self.db_pool.simple_upsert_txn( + txn, + table="monthly_active_users", + keyvalues={"user_id": user_id}, + values={"timestamp": int(self._clock.time_msec())}, + ) + else: + logger.warning("mau limit reserved threepid %s not found in db" % tp) + async def upsert_monthly_active_user(self, user_id: str) -> None: """Updates or inserts the user into the monthly active user table, which is used to track the current MAU usage of the server diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 3c7630857f..c0f2af0785 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -192,6 +192,18 @@ class RoomWorkerStore(SQLBaseStore): "count_public_rooms", _count_public_rooms_txn ) + async def get_room_count(self) -> int: + """Retrieve the total number of rooms. + """ + + def f(txn): + sql = "SELECT count(*) FROM rooms" + txn.execute(sql) + row = txn.fetchone() + return row[0] or 0 + + return await self.db_pool.runInteraction("get_rooms", f) + async def get_largest_public_rooms( self, network_tuple: Optional[ThirdPartyInstanceID], @@ -1292,18 +1304,6 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore, SearchStore): ) self.hs.get_notifier().on_new_replication_data() - async def get_room_count(self) -> int: - """Retrieve the total number of rooms. - """ - - def f(txn): - sql = "SELECT count(*) FROM rooms" - txn.execute(sql) - row = txn.fetchone() - return row[0] or 0 - - return await self.db_pool.runInteraction("get_rooms", f) - async def add_event_report( self, room_id: str, diff --git a/synapse/storage/databases/main/ui_auth.py b/synapse/storage/databases/main/ui_auth.py index 3b9211a6d2..79b7ece330 100644 --- a/synapse/storage/databases/main/ui_auth.py +++ b/synapse/storage/databases/main/ui_auth.py @@ -288,8 +288,6 @@ class UIAuthWorkerStore(SQLBaseStore): ) return [(row["user_agent"], row["ip"]) for row in rows] - -class UIAuthStore(UIAuthWorkerStore): async def delete_old_ui_auth_sessions(self, expiration_time: int) -> None: """ Remove sessions which were last used earlier than the expiration time. @@ -339,3 +337,7 @@ class UIAuthStore(UIAuthWorkerStore): iterable=session_ids, keyvalues={}, ) + + +class UIAuthStore(UIAuthWorkerStore): + pass diff --git a/tests/test_phone_home.py b/tests/test_phone_home.py index 7657bddea5..e7aed092c2 100644 --- a/tests/test_phone_home.py +++ b/tests/test_phone_home.py @@ -17,7 +17,7 @@ import resource import mock -from synapse.app.homeserver import phone_stats_home +from synapse.app.phone_stats_home import phone_stats_home from tests.unittest import HomeserverTestCase diff --git a/tests/utils.py b/tests/utils.py index 4673872f88..7a927c7f74 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -276,7 +276,7 @@ def setup_test_homeserver( hs.setup() if homeserverToUse.__name__ == "TestHomeServer": - hs.setup_master() + hs.setup_background_tasks() if isinstance(db_engine, PostgresEngine): database = hs.get_datastores().databases[0] From ec10bdd32bb52af73789f5f60b39135578a739b1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 2 Oct 2020 15:09:31 +0100 Subject: [PATCH 101/134] Speed up unit tests when using PostgreSQL (#8450) --- changelog.d/8450.misc | 1 + synapse/storage/databases/main/events_worker.py | 13 ++++++++++++- tests/server.py | 4 ++++ 3 files changed, 17 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8450.misc diff --git a/changelog.d/8450.misc b/changelog.d/8450.misc new file mode 100644 index 0000000000..4e04c523ab --- /dev/null +++ b/changelog.d/8450.misc @@ -0,0 +1 @@ +Speed up unit tests when using PostgreSQL. diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index f95679ebc4..723ced4ff0 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -74,6 +74,13 @@ class EventRedactBehaviour(Names): class EventsWorkerStore(SQLBaseStore): + # Whether to use dedicated DB threads for event fetching. This is only used + # if there are multiple DB threads available. When used will lock the DB + # thread for periods of time (so unit tests want to disable this when they + # run DB transactions on the main thread). See EVENT_QUEUE_* for more + # options controlling this. + USE_DEDICATED_DB_THREADS_FOR_EVENT_FETCHING = True + def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) @@ -522,7 +529,11 @@ class EventsWorkerStore(SQLBaseStore): if not event_list: single_threaded = self.database_engine.single_threaded - if single_threaded or i > EVENT_QUEUE_ITERATIONS: + if ( + not self.USE_DEDICATED_DB_THREADS_FOR_EVENT_FETCHING + or single_threaded + or i > EVENT_QUEUE_ITERATIONS + ): self._event_fetch_ongoing -= 1 return else: diff --git a/tests/server.py b/tests/server.py index b404ad4e2a..f7f5276b21 100644 --- a/tests/server.py +++ b/tests/server.py @@ -372,6 +372,10 @@ def setup_test_homeserver(cleanup_func, *args, **kwargs): pool.threadpool = ThreadPool(clock._reactor) pool.running = True + # We've just changed the Databases to run DB transactions on the same + # thread, so we need to disable the dedicated thread behaviour. + server.get_datastores().main.USE_DEDICATED_DB_THREADS_FOR_EVENT_FETCHING = False + return server From e3debf9682ed59b2972f236fe2982b6af0a9bb9a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 2 Oct 2020 15:20:45 +0100 Subject: [PATCH 102/134] Add logging on startup/shutdown (#8448) This is so we can tell what is going on when things are taking a while to start up. The main change here is to ensure that transactions that are created during startup get correctly logged like normal transactions. --- changelog.d/8448.misc | 1 + scripts/synapse_port_db | 2 +- synapse/app/_base.py | 5 ++ synapse/storage/database.py | 89 +++++++++++++++---- synapse/storage/databases/__init__.py | 2 +- synapse/storage/databases/main/__init__.py | 1 - .../databases/main/event_push_actions.py | 8 +- .../databases/main/monthly_active_users.py | 1 - synapse/storage/databases/main/roommember.py | 13 +-- .../databases/main/schema/delta/20/pushers.py | 19 ++-- .../databases/main/schema/delta/25/fts.py | 2 - .../databases/main/schema/delta/27/ts.py | 2 - .../main/schema/delta/30/as_users.py | 6 +- .../databases/main/schema/delta/31/pushers.py | 19 ++-- .../main/schema/delta/31/search_update.py | 2 - .../main/schema/delta/33/event_fields.py | 2 - .../main/schema/delta/33/remote_media_ts.py | 5 +- .../delta/56/unique_user_filter_index.py | 7 +- .../delta/57/local_current_membership.py | 1 - synapse/storage/prepare_database.py | 33 +++---- synapse/storage/types.py | 6 ++ synapse/storage/util/id_generators.py | 8 +- synapse/storage/util/sequence.py | 15 +++- tests/storage/test_appservice.py | 14 +-- tests/utils.py | 2 + 25 files changed, 152 insertions(+), 113 deletions(-) create mode 100644 changelog.d/8448.misc diff --git a/changelog.d/8448.misc b/changelog.d/8448.misc new file mode 100644 index 0000000000..5ddda1803b --- /dev/null +++ b/changelog.d/8448.misc @@ -0,0 +1 @@ +Add SQL logging on queries that happen during startup. diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index ae2887b7d2..7e12f5440c 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -489,7 +489,7 @@ class Porter(object): hs = MockHomeserver(self.hs_config) - with make_conn(db_config, engine) as db_conn: + with make_conn(db_config, engine, "portdb") as db_conn: engine.check_database( db_conn, allow_outdated_version=allow_outdated_version ) diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 8bb0b142ca..f6f7b2bf42 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -272,6 +272,11 @@ def start(hs: "synapse.server.HomeServer", listeners: Iterable[ListenerConfig]): hs.get_datastore().db_pool.start_profiling() hs.get_pusherpool().start() + # Log when we start the shut down process. + hs.get_reactor().addSystemEventTrigger( + "before", "shutdown", logger.info, "Shutting down..." + ) + setup_sentry(hs) setup_sdnotify(hs) diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 79ec8f119d..0d9d9b7cc0 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -32,6 +32,7 @@ from typing import ( overload, ) +import attr from prometheus_client import Histogram from typing_extensions import Literal @@ -90,13 +91,17 @@ def make_pool( return adbapi.ConnectionPool( db_config.config["name"], cp_reactor=reactor, - cp_openfun=engine.on_new_connection, + cp_openfun=lambda conn: engine.on_new_connection( + LoggingDatabaseConnection(conn, engine, "on_new_connection") + ), **db_config.config.get("args", {}) ) def make_conn( - db_config: DatabaseConnectionConfig, engine: BaseDatabaseEngine + db_config: DatabaseConnectionConfig, + engine: BaseDatabaseEngine, + default_txn_name: str, ) -> Connection: """Make a new connection to the database and return it. @@ -109,11 +114,60 @@ def make_conn( for k, v in db_config.config.get("args", {}).items() if not k.startswith("cp_") } - db_conn = engine.module.connect(**db_params) + native_db_conn = engine.module.connect(**db_params) + db_conn = LoggingDatabaseConnection(native_db_conn, engine, default_txn_name) + engine.on_new_connection(db_conn) return db_conn +@attr.s(slots=True) +class LoggingDatabaseConnection: + """A wrapper around a database connection that returns `LoggingTransaction` + as its cursor class. + + This is mainly used on startup to ensure that queries get logged correctly + """ + + conn = attr.ib(type=Connection) + engine = attr.ib(type=BaseDatabaseEngine) + default_txn_name = attr.ib(type=str) + + def cursor( + self, *, txn_name=None, after_callbacks=None, exception_callbacks=None + ) -> "LoggingTransaction": + if not txn_name: + txn_name = self.default_txn_name + + return LoggingTransaction( + self.conn.cursor(), + name=txn_name, + database_engine=self.engine, + after_callbacks=after_callbacks, + exception_callbacks=exception_callbacks, + ) + + def close(self) -> None: + self.conn.close() + + def commit(self) -> None: + self.conn.commit() + + def rollback(self, *args, **kwargs) -> None: + self.conn.rollback(*args, **kwargs) + + def __enter__(self) -> "Connection": + self.conn.__enter__() + return self + + def __exit__(self, exc_type, exc_value, traceback) -> bool: + return self.conn.__exit__(exc_type, exc_value, traceback) + + # Proxy through any unknown lookups to the DB conn class. + def __getattr__(self, name): + return getattr(self.conn, name) + + # The type of entry which goes on our after_callbacks and exception_callbacks lists. # # Python 3.5.2 doesn't support Callable with an ellipsis, so we wrap it in quotes so @@ -247,6 +301,12 @@ class LoggingTransaction: def close(self) -> None: self.txn.close() + def __enter__(self) -> "LoggingTransaction": + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + class PerformanceCounters: def __init__(self): @@ -395,7 +455,7 @@ class DatabasePool: def new_transaction( self, - conn: Connection, + conn: LoggingDatabaseConnection, desc: str, after_callbacks: List[_CallbackListEntry], exception_callbacks: List[_CallbackListEntry], @@ -418,12 +478,10 @@ class DatabasePool: i = 0 N = 5 while True: - cursor = LoggingTransaction( - conn.cursor(), - name, - self.engine, - after_callbacks, - exception_callbacks, + cursor = conn.cursor( + txn_name=name, + after_callbacks=after_callbacks, + exception_callbacks=exception_callbacks, ) try: r = func(cursor, *args, **kwargs) @@ -584,7 +642,10 @@ class DatabasePool: logger.debug("Reconnecting closed database connection") conn.reconnect() - return func(conn, *args, **kwargs) + db_conn = LoggingDatabaseConnection( + conn, self.engine, "runWithConnection" + ) + return func(db_conn, *args, **kwargs) return await make_deferred_yieldable( self._db_pool.runWithConnection(inner_func, *args, **kwargs) @@ -1621,7 +1682,7 @@ class DatabasePool: def get_cache_dict( self, - db_conn: Connection, + db_conn: LoggingDatabaseConnection, table: str, entity_column: str, stream_column: str, @@ -1642,9 +1703,7 @@ class DatabasePool: "limit": limit, } - sql = self.engine.convert_param_style(sql) - - txn = db_conn.cursor() + txn = db_conn.cursor(txn_name="get_cache_dict") txn.execute(sql, (int(max_value),)) cache = {row[0]: int(row[1]) for row in txn} diff --git a/synapse/storage/databases/__init__.py b/synapse/storage/databases/__init__.py index aa5d490624..0c24325011 100644 --- a/synapse/storage/databases/__init__.py +++ b/synapse/storage/databases/__init__.py @@ -46,7 +46,7 @@ class Databases: db_name = database_config.name engine = create_engine(database_config.config) - with make_conn(database_config, engine) as db_conn: + with make_conn(database_config, engine, "startup") as db_conn: logger.info("[database config %r]: Checking database server", db_name) engine.check_database(db_conn) diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index f823d66709..9b16f45f3e 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -284,7 +284,6 @@ class DataStore( " last_user_sync_ts, status_msg, currently_active FROM presence_stream" " WHERE state != ?" ) - sql = self.database_engine.convert_param_style(sql) txn = db_conn.cursor() txn.execute(sql, (PresenceState.OFFLINE,)) diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 62f1738732..80f3b4d740 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -20,7 +20,7 @@ from typing import Dict, List, Optional, Tuple, Union import attr from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.storage._base import LoggingTransaction, SQLBaseStore, db_to_json +from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import DatabasePool from synapse.util import json_encoder from synapse.util.caches.descriptors import cached @@ -74,11 +74,7 @@ class EventPushActionsWorkerStore(SQLBaseStore): self.stream_ordering_month_ago = None self.stream_ordering_day_ago = None - cur = LoggingTransaction( - db_conn.cursor(), - name="_find_stream_orderings_for_times_txn", - database_engine=self.database_engine, - ) + cur = db_conn.cursor(txn_name="_find_stream_orderings_for_times_txn") self._find_stream_orderings_for_times_txn(cur) cur.close() diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py index b2127598ef..c66f558567 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py @@ -214,7 +214,6 @@ class MonthlyActiveUsersStore(MonthlyActiveUsersWorkerStore): self._mau_stats_only = hs.config.mau_stats_only # Do not add more reserved users than the total allowable number - # cur = LoggingTransaction( self.db_pool.new_transaction( db_conn, "initialise_mau_threepids", diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 86ffe2479e..bae1bd22d3 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -21,12 +21,7 @@ from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.metrics import LaterGauge from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.storage._base import ( - LoggingTransaction, - SQLBaseStore, - db_to_json, - make_in_list_sql_clause, -) +from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause from synapse.storage.database import DatabasePool from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.engines import Sqlite3Engine @@ -60,10 +55,8 @@ class RoomMemberWorkerStore(EventsWorkerStore): # background update still running? self._current_state_events_membership_up_to_date = False - txn = LoggingTransaction( - db_conn.cursor(), - name="_check_safe_current_state_events_membership_updated", - database_engine=self.database_engine, + txn = db_conn.cursor( + txn_name="_check_safe_current_state_events_membership_updated" ) self._check_safe_current_state_events_membership_updated_txn(txn) txn.close() diff --git a/synapse/storage/databases/main/schema/delta/20/pushers.py b/synapse/storage/databases/main/schema/delta/20/pushers.py index 3edfcfd783..45b846e6a7 100644 --- a/synapse/storage/databases/main/schema/delta/20/pushers.py +++ b/synapse/storage/databases/main/schema/delta/20/pushers.py @@ -66,16 +66,15 @@ def run_create(cur, database_engine, *args, **kwargs): row[8] = bytes(row[8]).decode("utf-8") row[11] = bytes(row[11]).decode("utf-8") cur.execute( - database_engine.convert_param_style( - """ - INSERT into pushers2 ( - id, user_name, access_token, profile_tag, kind, - app_id, app_display_name, device_display_name, - pushkey, ts, lang, data, last_token, last_success, - failing_since - ) values (%s)""" - % (",".join(["?" for _ in range(len(row))])) - ), + """ + INSERT into pushers2 ( + id, user_name, access_token, profile_tag, kind, + app_id, app_display_name, device_display_name, + pushkey, ts, lang, data, last_token, last_success, + failing_since + ) values (%s) + """ + % (",".join(["?" for _ in range(len(row))])), row, ) count += 1 diff --git a/synapse/storage/databases/main/schema/delta/25/fts.py b/synapse/storage/databases/main/schema/delta/25/fts.py index ee675e71ff..21f57825d4 100644 --- a/synapse/storage/databases/main/schema/delta/25/fts.py +++ b/synapse/storage/databases/main/schema/delta/25/fts.py @@ -71,8 +71,6 @@ def run_create(cur, database_engine, *args, **kwargs): " VALUES (?, ?)" ) - sql = database_engine.convert_param_style(sql) - cur.execute(sql, ("event_search", progress_json)) diff --git a/synapse/storage/databases/main/schema/delta/27/ts.py b/synapse/storage/databases/main/schema/delta/27/ts.py index b7972cfa8e..1c6058063f 100644 --- a/synapse/storage/databases/main/schema/delta/27/ts.py +++ b/synapse/storage/databases/main/schema/delta/27/ts.py @@ -50,8 +50,6 @@ def run_create(cur, database_engine, *args, **kwargs): " VALUES (?, ?)" ) - sql = database_engine.convert_param_style(sql) - cur.execute(sql, ("event_origin_server_ts", progress_json)) diff --git a/synapse/storage/databases/main/schema/delta/30/as_users.py b/synapse/storage/databases/main/schema/delta/30/as_users.py index b42c02710a..7f08fabe9f 100644 --- a/synapse/storage/databases/main/schema/delta/30/as_users.py +++ b/synapse/storage/databases/main/schema/delta/30/as_users.py @@ -59,9 +59,7 @@ def run_upgrade(cur, database_engine, config, *args, **kwargs): user_chunks = (user_ids[i : i + 100] for i in range(0, len(user_ids), n)) for chunk in user_chunks: cur.execute( - database_engine.convert_param_style( - "UPDATE users SET appservice_id = ? WHERE name IN (%s)" - % (",".join("?" for _ in chunk),) - ), + "UPDATE users SET appservice_id = ? WHERE name IN (%s)" + % (",".join("?" for _ in chunk),), [as_id] + chunk, ) diff --git a/synapse/storage/databases/main/schema/delta/31/pushers.py b/synapse/storage/databases/main/schema/delta/31/pushers.py index 9bb504aad5..5be81c806a 100644 --- a/synapse/storage/databases/main/schema/delta/31/pushers.py +++ b/synapse/storage/databases/main/schema/delta/31/pushers.py @@ -65,16 +65,15 @@ def run_create(cur, database_engine, *args, **kwargs): row = list(row) row[12] = token_to_stream_ordering(row[12]) cur.execute( - database_engine.convert_param_style( - """ - INSERT into pushers2 ( - id, user_name, access_token, profile_tag, kind, - app_id, app_display_name, device_display_name, - pushkey, ts, lang, data, last_stream_ordering, last_success, - failing_since - ) values (%s)""" - % (",".join(["?" for _ in range(len(row))])) - ), + """ + INSERT into pushers2 ( + id, user_name, access_token, profile_tag, kind, + app_id, app_display_name, device_display_name, + pushkey, ts, lang, data, last_stream_ordering, last_success, + failing_since + ) values (%s) + """ + % (",".join(["?" for _ in range(len(row))])), row, ) count += 1 diff --git a/synapse/storage/databases/main/schema/delta/31/search_update.py b/synapse/storage/databases/main/schema/delta/31/search_update.py index 63b757ade6..b84c844e3a 100644 --- a/synapse/storage/databases/main/schema/delta/31/search_update.py +++ b/synapse/storage/databases/main/schema/delta/31/search_update.py @@ -55,8 +55,6 @@ def run_create(cur, database_engine, *args, **kwargs): " VALUES (?, ?)" ) - sql = database_engine.convert_param_style(sql) - cur.execute(sql, ("event_search_order", progress_json)) diff --git a/synapse/storage/databases/main/schema/delta/33/event_fields.py b/synapse/storage/databases/main/schema/delta/33/event_fields.py index a3e81eeac7..e928c66a8f 100644 --- a/synapse/storage/databases/main/schema/delta/33/event_fields.py +++ b/synapse/storage/databases/main/schema/delta/33/event_fields.py @@ -50,8 +50,6 @@ def run_create(cur, database_engine, *args, **kwargs): " VALUES (?, ?)" ) - sql = database_engine.convert_param_style(sql) - cur.execute(sql, ("event_fields_sender_url", progress_json)) diff --git a/synapse/storage/databases/main/schema/delta/33/remote_media_ts.py b/synapse/storage/databases/main/schema/delta/33/remote_media_ts.py index a26057dfb6..ad875c733a 100644 --- a/synapse/storage/databases/main/schema/delta/33/remote_media_ts.py +++ b/synapse/storage/databases/main/schema/delta/33/remote_media_ts.py @@ -23,8 +23,5 @@ def run_create(cur, database_engine, *args, **kwargs): def run_upgrade(cur, database_engine, *args, **kwargs): cur.execute( - database_engine.convert_param_style( - "UPDATE remote_media_cache SET last_access_ts = ?" - ), - (int(time.time() * 1000),), + "UPDATE remote_media_cache SET last_access_ts = ?", (int(time.time() * 1000),), ) diff --git a/synapse/storage/databases/main/schema/delta/56/unique_user_filter_index.py b/synapse/storage/databases/main/schema/delta/56/unique_user_filter_index.py index 1de8b54961..bb7296852a 100644 --- a/synapse/storage/databases/main/schema/delta/56/unique_user_filter_index.py +++ b/synapse/storage/databases/main/schema/delta/56/unique_user_filter_index.py @@ -1,6 +1,8 @@ import logging +from io import StringIO from synapse.storage.engines import PostgresEngine +from synapse.storage.prepare_database import execute_statements_from_stream logger = logging.getLogger(__name__) @@ -46,7 +48,4 @@ def run_create(cur, database_engine, *args, **kwargs): select_clause, ) - if isinstance(database_engine, PostgresEngine): - cur.execute(sql) - else: - cur.executescript(sql) + execute_statements_from_stream(cur, StringIO(sql)) diff --git a/synapse/storage/databases/main/schema/delta/57/local_current_membership.py b/synapse/storage/databases/main/schema/delta/57/local_current_membership.py index 63b5acdcf7..44917f0a2e 100644 --- a/synapse/storage/databases/main/schema/delta/57/local_current_membership.py +++ b/synapse/storage/databases/main/schema/delta/57/local_current_membership.py @@ -68,7 +68,6 @@ def run_upgrade(cur, database_engine, config, *args, **kwargs): INNER JOIN room_memberships AS r USING (event_id) WHERE type = 'm.room.member' AND state_key LIKE ? """ - sql = database_engine.convert_param_style(sql) cur.execute(sql, ("%:" + config.server_name,)) cur.execute( diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index 4957e77f4c..459754feab 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -13,7 +13,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import imp import logging import os @@ -24,9 +23,10 @@ from typing import Optional, TextIO import attr from synapse.config.homeserver import HomeServerConfig +from synapse.storage.database import LoggingDatabaseConnection from synapse.storage.engines import BaseDatabaseEngine from synapse.storage.engines.postgres import PostgresEngine -from synapse.storage.types import Connection, Cursor +from synapse.storage.types import Cursor from synapse.types import Collection logger = logging.getLogger(__name__) @@ -67,7 +67,7 @@ UNAPPLIED_DELTA_ON_WORKER_ERROR = ( def prepare_database( - db_conn: Connection, + db_conn: LoggingDatabaseConnection, database_engine: BaseDatabaseEngine, config: Optional[HomeServerConfig], databases: Collection[str] = ["main", "state"], @@ -89,7 +89,7 @@ def prepare_database( """ try: - cur = db_conn.cursor() + cur = db_conn.cursor(txn_name="prepare_database") # sqlite does not automatically start transactions for DDL / SELECT statements, # so we start one before running anything. This ensures that any upgrades @@ -258,9 +258,7 @@ def _setup_new_database(cur, database_engine, databases): executescript(cur, entry.absolute_path) cur.execute( - database_engine.convert_param_style( - "INSERT INTO schema_version (version, upgraded) VALUES (?,?)" - ), + "INSERT INTO schema_version (version, upgraded) VALUES (?,?)", (max_current_ver, False), ) @@ -486,17 +484,13 @@ def _upgrade_existing_database( # Mark as done. cur.execute( - database_engine.convert_param_style( - "INSERT INTO applied_schema_deltas (version, file) VALUES (?,?)" - ), + "INSERT INTO applied_schema_deltas (version, file) VALUES (?,?)", (v, relative_path), ) cur.execute("DELETE FROM schema_version") cur.execute( - database_engine.convert_param_style( - "INSERT INTO schema_version (version, upgraded) VALUES (?,?)" - ), + "INSERT INTO schema_version (version, upgraded) VALUES (?,?)", (v, True), ) @@ -532,10 +526,7 @@ def _apply_module_schema_files(cur, database_engine, modname, names_and_streams) schemas to be applied """ cur.execute( - database_engine.convert_param_style( - "SELECT file FROM applied_module_schemas WHERE module_name = ?" - ), - (modname,), + "SELECT file FROM applied_module_schemas WHERE module_name = ?", (modname,), ) applied_deltas = {d for d, in cur} for (name, stream) in names_and_streams: @@ -553,9 +544,7 @@ def _apply_module_schema_files(cur, database_engine, modname, names_and_streams) # Mark as done. cur.execute( - database_engine.convert_param_style( - "INSERT INTO applied_module_schemas (module_name, file) VALUES (?,?)" - ), + "INSERT INTO applied_module_schemas (module_name, file) VALUES (?,?)", (modname, name), ) @@ -627,9 +616,7 @@ def _get_or_create_schema_state(txn, database_engine): if current_version: txn.execute( - database_engine.convert_param_style( - "SELECT file FROM applied_schema_deltas WHERE version >= ?" - ), + "SELECT file FROM applied_schema_deltas WHERE version >= ?", (current_version,), ) applied_deltas = [d for d, in txn] diff --git a/synapse/storage/types.py b/synapse/storage/types.py index 2d2b560e74..970bb1b9da 100644 --- a/synapse/storage/types.py +++ b/synapse/storage/types.py @@ -61,3 +61,9 @@ class Connection(Protocol): def rollback(self, *args, **kwargs) -> None: ... + + def __enter__(self) -> "Connection": + ... + + def __exit__(self, exc_type, exc_value, traceback) -> bool: + ... diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index c92cd4a6ba..51f680d05d 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -54,7 +54,7 @@ def _load_current_id(db_conn, table, column, step=1): """ # debug logging for https://github.com/matrix-org/synapse/issues/7968 logger.info("initialising stream generator for %s(%s)", table, column) - cur = db_conn.cursor() + cur = db_conn.cursor(txn_name="_load_current_id") if step == 1: cur.execute("SELECT MAX(%s) FROM %s" % (column, table)) else: @@ -269,7 +269,7 @@ class MultiWriterIdGenerator: def _load_current_ids( self, db_conn, table: str, instance_column: str, id_column: str ): - cur = db_conn.cursor() + cur = db_conn.cursor(txn_name="_load_current_ids") # Load the current positions of all writers for the stream. if self._writers: @@ -283,15 +283,12 @@ class MultiWriterIdGenerator: stream_name = ? AND instance_name != ALL(?) """ - sql = self._db.engine.convert_param_style(sql) cur.execute(sql, (self._stream_name, self._writers)) sql = """ SELECT instance_name, stream_id FROM stream_positions WHERE stream_name = ? """ - sql = self._db.engine.convert_param_style(sql) - cur.execute(sql, (self._stream_name,)) self._current_positions = { @@ -340,7 +337,6 @@ class MultiWriterIdGenerator: "instance": instance_column, "cmp": "<=" if self._positive else ">=", } - sql = self._db.engine.convert_param_style(sql) cur.execute(sql, (min_stream_id * self._return_factor,)) self._persisted_upto_position = min_stream_id diff --git a/synapse/storage/util/sequence.py b/synapse/storage/util/sequence.py index 2dd95e2709..ff2d038ad2 100644 --- a/synapse/storage/util/sequence.py +++ b/synapse/storage/util/sequence.py @@ -17,6 +17,7 @@ import logging import threading from typing import Callable, List, Optional +from synapse.storage.database import LoggingDatabaseConnection from synapse.storage.engines import ( BaseDatabaseEngine, IncorrectDatabaseSetup, @@ -53,7 +54,11 @@ class SequenceGenerator(metaclass=abc.ABCMeta): @abc.abstractmethod def check_consistency( - self, db_conn: Connection, table: str, id_column: str, positive: bool = True + self, + db_conn: LoggingDatabaseConnection, + table: str, + id_column: str, + positive: bool = True, ): """Should be called during start up to test that the current value of the sequence is greater than or equal to the maximum ID in the table. @@ -82,9 +87,13 @@ class PostgresSequenceGenerator(SequenceGenerator): return [i for (i,) in txn] def check_consistency( - self, db_conn: Connection, table: str, id_column: str, positive: bool = True + self, + db_conn: LoggingDatabaseConnection, + table: str, + id_column: str, + positive: bool = True, ): - txn = db_conn.cursor() + txn = db_conn.cursor(txn_name="sequence.check_consistency") # First we get the current max ID from the table. table_sql = "SELECT GREATEST(%(agg)s(%(id)s), 0) FROM %(table)s" % { diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index 46f94914ff..c905a38930 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -58,7 +58,7 @@ class ApplicationServiceStoreTestCase(unittest.TestCase): # must be done after inserts database = hs.get_datastores().databases[0] self.store = ApplicationServiceStore( - database, make_conn(database._database_config, database.engine), hs + database, make_conn(database._database_config, database.engine, "test"), hs ) def tearDown(self): @@ -132,7 +132,7 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): db_config = hs.config.get_single_database() self.store = TestTransactionStore( - database, make_conn(db_config, self.engine), hs + database, make_conn(db_config, self.engine, "test"), hs ) def _add_service(self, url, as_token, id): @@ -448,7 +448,7 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase): database = hs.get_datastores().databases[0] ApplicationServiceStore( - database, make_conn(database._database_config, database.engine), hs + database, make_conn(database._database_config, database.engine, "test"), hs ) @defer.inlineCallbacks @@ -467,7 +467,9 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase): with self.assertRaises(ConfigError) as cm: database = hs.get_datastores().databases[0] ApplicationServiceStore( - database, make_conn(database._database_config, database.engine), hs + database, + make_conn(database._database_config, database.engine, "test"), + hs, ) e = cm.exception @@ -491,7 +493,9 @@ class ApplicationServiceStoreConfigTestCase(unittest.TestCase): with self.assertRaises(ConfigError) as cm: database = hs.get_datastores().databases[0] ApplicationServiceStore( - database, make_conn(database._database_config, database.engine), hs + database, + make_conn(database._database_config, database.engine, "test"), + hs, ) e = cm.exception diff --git a/tests/utils.py b/tests/utils.py index 7a927c7f74..af563ffe0f 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -38,6 +38,7 @@ from synapse.http.server import HttpServer from synapse.logging.context import current_context, set_current_context from synapse.server import HomeServer from synapse.storage import DataStore +from synapse.storage.database import LoggingDatabaseConnection from synapse.storage.engines import PostgresEngine, create_engine from synapse.storage.prepare_database import prepare_database from synapse.util.ratelimitutils import FederationRateLimiter @@ -88,6 +89,7 @@ def setupdb(): host=POSTGRES_HOST, password=POSTGRES_PASSWORD, ) + db_conn = LoggingDatabaseConnection(db_conn, db_engine, "tests") prepare_database(db_conn, db_engine, None) db_conn.close() From c5251c6fbd2722d54d33e02021f286053e611efc Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 5 Oct 2020 09:28:05 -0400 Subject: [PATCH 103/134] Do not assume that account data is of the correct form. (#8454) This fixes a bug where `m.ignored_user_list` was assumed to be a dict, leading to odd behavior for users who set it to something else. --- changelog.d/8454.bugfix | 1 + synapse/api/constants.py | 5 +++++ synapse/handlers/room_member.py | 6 +++--- synapse/handlers/sync.py | 19 +++++++++++-------- .../storage/databases/main/account_data.py | 9 +++++++-- synapse/visibility.py | 15 +++++++-------- 6 files changed, 34 insertions(+), 21 deletions(-) create mode 100644 changelog.d/8454.bugfix diff --git a/changelog.d/8454.bugfix b/changelog.d/8454.bugfix new file mode 100644 index 0000000000..c06d490b6f --- /dev/null +++ b/changelog.d/8454.bugfix @@ -0,0 +1 @@ +Fix a longstanding bug where invalid ignored users in account data could break clients. diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 46013cde15..592abd844b 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -155,3 +155,8 @@ class EventContentFields: class RoomEncryptionAlgorithms: MEGOLM_V1_AES_SHA2 = "m.megolm.v1.aes-sha2" DEFAULT = MEGOLM_V1_AES_SHA2 + + +class AccountDataTypes: + DIRECT = "m.direct" + IGNORED_USER_LIST = "m.ignored_user_list" diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 5ec36f591d..567a14bd0a 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -22,7 +22,7 @@ from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union from unpaddedbase64 import encode_base64 from synapse import types -from synapse.api.constants import MAX_DEPTH, EventTypes, Membership +from synapse.api.constants import MAX_DEPTH, AccountDataTypes, EventTypes, Membership from synapse.api.errors import ( AuthError, Codes, @@ -247,7 +247,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): user_account_data, _ = await self.store.get_account_data_for_user(user_id) # Copy direct message state if applicable - direct_rooms = user_account_data.get("m.direct", {}) + direct_rooms = user_account_data.get(AccountDataTypes.DIRECT, {}) # Check which key this room is under if isinstance(direct_rooms, dict): @@ -258,7 +258,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): # Save back to user's m.direct account data await self.store.add_account_data_for_user( - user_id, "m.direct", direct_rooms + user_id, AccountDataTypes.DIRECT, direct_rooms ) break diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 260ec19b41..a998e6b7f6 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -21,7 +21,7 @@ from typing import TYPE_CHECKING, Any, Dict, FrozenSet, List, Optional, Set, Tup import attr from prometheus_client import Counter -from synapse.api.constants import EventTypes, Membership +from synapse.api.constants import AccountDataTypes, EventTypes, Membership from synapse.api.filtering import FilterCollection from synapse.events import EventBase from synapse.logging.context import current_context @@ -1378,13 +1378,16 @@ class SyncHandler: return set(), set(), set(), set() ignored_account_data = await self.store.get_global_account_data_by_type_for_user( - "m.ignored_user_list", user_id=user_id + AccountDataTypes.IGNORED_USER_LIST, user_id=user_id ) + # If there is ignored users account data and it matches the proper type, + # then use it. + ignored_users = frozenset() # type: FrozenSet[str] if ignored_account_data: - ignored_users = ignored_account_data.get("ignored_users", {}).keys() - else: - ignored_users = frozenset() + ignored_users_data = ignored_account_data.get("ignored_users", {}) + if isinstance(ignored_users_data, dict): + ignored_users = frozenset(ignored_users_data.keys()) if since_token: room_changes = await self._get_rooms_changed( @@ -1478,7 +1481,7 @@ class SyncHandler: return False async def _get_rooms_changed( - self, sync_result_builder: "SyncResultBuilder", ignored_users: Set[str] + self, sync_result_builder: "SyncResultBuilder", ignored_users: FrozenSet[str] ) -> _RoomChanges: """Gets the the changes that have happened since the last sync. """ @@ -1690,7 +1693,7 @@ class SyncHandler: return _RoomChanges(room_entries, invited, newly_joined_rooms, newly_left_rooms) async def _get_all_rooms( - self, sync_result_builder: "SyncResultBuilder", ignored_users: Set[str] + self, sync_result_builder: "SyncResultBuilder", ignored_users: FrozenSet[str] ) -> _RoomChanges: """Returns entries for all rooms for the user. @@ -1764,7 +1767,7 @@ class SyncHandler: async def _generate_room_entry( self, sync_result_builder: "SyncResultBuilder", - ignored_users: Set[str], + ignored_users: FrozenSet[str], room_builder: "RoomSyncResultBuilder", ephemeral: List[JsonDict], tags: Optional[Dict[str, Dict[str, Any]]], diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index ef81d73573..49ee23470d 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -18,6 +18,7 @@ import abc import logging from typing import Dict, List, Optional, Tuple +from synapse.api.constants import AccountDataTypes from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import DatabasePool from synapse.storage.util.id_generators import StreamIdGenerator @@ -291,14 +292,18 @@ class AccountDataWorkerStore(SQLBaseStore, metaclass=abc.ABCMeta): self, ignored_user_id: str, ignorer_user_id: str, cache_context: _CacheContext ) -> bool: ignored_account_data = await self.get_global_account_data_by_type_for_user( - "m.ignored_user_list", + AccountDataTypes.IGNORED_USER_LIST, ignorer_user_id, on_invalidate=cache_context.invalidate, ) if not ignored_account_data: return False - return ignored_user_id in ignored_account_data.get("ignored_users", {}) + try: + return ignored_user_id in ignored_account_data.get("ignored_users", {}) + except TypeError: + # The type of the ignored_users field is invalid. + return False class AccountDataStore(AccountDataWorkerStore): diff --git a/synapse/visibility.py b/synapse/visibility.py index e3da7744d2..527365498e 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -16,7 +16,7 @@ import logging import operator -from synapse.api.constants import EventTypes, Membership +from synapse.api.constants import AccountDataTypes, EventTypes, Membership from synapse.events.utils import prune_event from synapse.storage import Storage from synapse.storage.state import StateFilter @@ -77,15 +77,14 @@ async def filter_events_for_client( ) ignore_dict_content = await storage.main.get_global_account_data_by_type_for_user( - "m.ignored_user_list", user_id + AccountDataTypes.IGNORED_USER_LIST, user_id ) - # FIXME: This will explode if people upload something incorrect. - ignore_list = frozenset( - ignore_dict_content.get("ignored_users", {}).keys() - if ignore_dict_content - else [] - ) + ignore_list = frozenset() + if ignore_dict_content: + ignored_users_dict = ignore_dict_content.get("ignored_users", {}) + if isinstance(ignored_users_dict, dict): + ignore_list = frozenset(ignored_users_dict.keys()) erased_senders = await storage.main.are_users_erased((e.sender for e in events)) From f64c6aae68932df95a98a75fb707450260b614df Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 5 Oct 2020 09:40:19 -0400 Subject: [PATCH 104/134] Update manhole documentation for async/await. (#8462) --- changelog.d/8462.doc | 1 + docs/manhole.md | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) create mode 100644 changelog.d/8462.doc diff --git a/changelog.d/8462.doc b/changelog.d/8462.doc new file mode 100644 index 0000000000..cf84db6db7 --- /dev/null +++ b/changelog.d/8462.doc @@ -0,0 +1 @@ +Update the directions for using the manhole with coroutines. diff --git a/docs/manhole.md b/docs/manhole.md index 7375f5ad46..75b6ae40e0 100644 --- a/docs/manhole.md +++ b/docs/manhole.md @@ -35,9 +35,12 @@ This gives a Python REPL in which `hs` gives access to the `synapse.server.HomeServer` object - which in turn gives access to many other parts of the process. +Note that any call which returns a coroutine will need to be wrapped in `ensureDeferred`. + As a simple example, retrieving an event from the database: -``` ->>> hs.get_datastore().get_event('$1416420717069yeQaw:matrix.org') +```pycon +>>> from twisted.internet import defer +>>> defer.ensureDeferred(hs.get_datastore().get_event('$1416420717069yeQaw:matrix.org')) > ``` From f31f8e63198cfe46af48d788dbb294aba9155e5a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 5 Oct 2020 14:43:14 +0100 Subject: [PATCH 105/134] Remove stream ordering from Metadata dict (#8452) There's no need for it to be in the dict as well as the events table. Instead, we store it in a separate attribute in the EventInternalMetadata object, and populate that on load. This means that we can rely on it being correctly populated for any event which has been persited to the database. --- changelog.d/8452.misc | 1 + synapse/events/__init__.py | 6 +++-- synapse/events/utils.py | 5 ++++ synapse/federation/sender/__init__.py | 2 ++ .../sender/per_destination_queue.py | 2 ++ synapse/handlers/federation.py | 3 +++ synapse/handlers/message.py | 4 ++- synapse/handlers/room_member.py | 13 +++++----- synapse/rest/admin/__init__.py | 5 +++- synapse/storage/databases/main/events.py | 4 +++ .../storage/databases/main/events_worker.py | 26 ++++++++++++------- synapse/storage/databases/main/stream.py | 13 ---------- synapse/storage/persist_events.py | 2 ++ 13 files changed, 53 insertions(+), 33 deletions(-) create mode 100644 changelog.d/8452.misc diff --git a/changelog.d/8452.misc b/changelog.d/8452.misc new file mode 100644 index 0000000000..8288d91c78 --- /dev/null +++ b/changelog.d/8452.misc @@ -0,0 +1 @@ +Remove redundant databae loads of stream_ordering for events we already have. diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index dc49df0812..7a51d0a22f 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -97,13 +97,16 @@ class DefaultDictProperty(DictProperty): class _EventInternalMetadata: - __slots__ = ["_dict"] + __slots__ = ["_dict", "stream_ordering"] def __init__(self, internal_metadata_dict: JsonDict): # we have to copy the dict, because it turns out that the same dict is # reused. TODO: fix that self._dict = dict(internal_metadata_dict) + # the stream ordering of this event. None, until it has been persisted. + self.stream_ordering = None # type: Optional[int] + outlier = DictProperty("outlier") # type: bool out_of_band_membership = DictProperty("out_of_band_membership") # type: bool send_on_behalf_of = DictProperty("send_on_behalf_of") # type: str @@ -113,7 +116,6 @@ class _EventInternalMetadata: redacted = DictProperty("redacted") # type: bool txn_id = DictProperty("txn_id") # type: str token_id = DictProperty("token_id") # type: str - stream_ordering = DictProperty("stream_ordering") # type: int # XXX: These are set by StreamWorkerStore._set_before_and_after. # I'm pretty sure that these are never persisted to the database, so shouldn't diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 32c73d3413..355cbe05f1 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -49,6 +49,11 @@ def prune_event(event: EventBase) -> EventBase: pruned_event_dict, event.room_version, event.internal_metadata.get_dict() ) + # copy the internal fields + pruned_event.internal_metadata.stream_ordering = ( + event.internal_metadata.stream_ordering + ) + # Mark the event as redacted pruned_event.internal_metadata.redacted = True diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 8bb17b3a05..e33b29a42c 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -297,6 +297,8 @@ class FederationSender: sent_pdus_destination_dist_total.inc(len(destinations)) sent_pdus_destination_dist_count.inc() + assert pdu.internal_metadata.stream_ordering + # track the fact that we have a PDU for these destinations, # to allow us to perform catch-up later on if the remote is unreachable # for a while. diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index bc99af3fdd..db8e456fe8 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -158,6 +158,7 @@ class PerDestinationQueue: # yet know if we have anything to catch up (None) self._pending_pdus.append(pdu) else: + assert pdu.internal_metadata.stream_ordering self._catchup_last_skipped = pdu.internal_metadata.stream_ordering self.attempt_new_transaction() @@ -361,6 +362,7 @@ class PerDestinationQueue: last_successful_stream_ordering = ( final_pdu.internal_metadata.stream_ordering ) + assert last_successful_stream_ordering await self._store.set_destination_last_successful_stream_ordering( self._destination, last_successful_stream_ordering ) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 1a8144405a..5ac2fc5656 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -3008,6 +3008,9 @@ class FederationHandler(BaseHandler): elif event.internal_metadata.is_outlier(): return + # the event has been persisted so it should have a stream ordering. + assert event.internal_metadata.stream_ordering + event_pos = PersistedEventPosition( self._instance_name, event.internal_metadata.stream_ordering ) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index ee271e85e5..00513fbf37 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -682,7 +682,9 @@ class EventCreationHandler: event.event_id, prev_event.event_id, ) - return await self.store.get_stream_id_for_event(prev_event.event_id) + # we know it was persisted, so must have a stream ordering + assert prev_event.internal_metadata.stream_ordering + return prev_event.internal_metadata.stream_ordering return await self.handle_new_client_event( requester=requester, event=event, context=context, ratelimit=ratelimit diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 567a14bd0a..13b749b7cb 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -194,8 +194,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): ) if duplicate is not None: # Discard the new event since this membership change is a no-op. - _, stream_id = await self.store.get_event_ordering(duplicate.event_id) - return duplicate.event_id, stream_id + # we know it was persisted, so must have a stream ordering. + assert duplicate.internal_metadata.stream_ordering + return duplicate.event_id, duplicate.internal_metadata.stream_ordering prev_state_ids = await context.get_prev_state_ids() @@ -441,12 +442,12 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): same_membership = old_membership == effective_membership_state same_sender = requester.user.to_string() == old_state.sender if same_sender and same_membership and same_content: - _, stream_id = await self.store.get_event_ordering( - old_state.event_id - ) + # duplicate event. + # we know it was persisted, so must have a stream ordering. + assert old_state.internal_metadata.stream_ordering return ( old_state.event_id, - stream_id, + old_state.internal_metadata.stream_ordering, ) if old_membership in ["ban", "leave"] and action == "kick": diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 57cac22252..789431ef25 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -57,6 +57,7 @@ from synapse.rest.admin.users import ( UsersRestServletV2, WhoisRestServlet, ) +from synapse.types import RoomStreamToken from synapse.util.versionstring import get_version_string logger = logging.getLogger(__name__) @@ -109,7 +110,9 @@ class PurgeHistoryRestServlet(RestServlet): if event.room_id != room_id: raise SynapseError(400, "Event is for wrong room.") - room_token = await self.store.get_topological_token_for_event(event_id) + room_token = RoomStreamToken( + event.depth, event.internal_metadata.stream_ordering + ) token = await room_token.to_string(self.store) logger.info("[purge] purging up to token %s (event_id %s)", token, event_id) diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 78e645592f..b4abd961b9 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -331,6 +331,10 @@ class PersistEventsStore: min_stream_order = events_and_contexts[0][0].internal_metadata.stream_ordering max_stream_order = events_and_contexts[-1][0].internal_metadata.stream_ordering + # stream orderings should have been assigned by now + assert min_stream_order + assert max_stream_order + self._update_forward_extremities_txn( txn, new_forward_extremities=new_forward_extremeties, diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 723ced4ff0..b7ed8ca6ab 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -723,6 +723,7 @@ class EventsWorkerStore(SQLBaseStore): internal_metadata_dict=internal_metadata, rejected_reason=rejected_reason, ) + original_ev.internal_metadata.stream_ordering = row["stream_ordering"] event_map[event_id] = original_ev @@ -790,6 +791,8 @@ class EventsWorkerStore(SQLBaseStore): * event_id (str) + * stream_ordering (int): stream ordering for this event + * json (str): json-encoded event structure * internal_metadata (str): json-encoded internal metadata dict @@ -822,13 +825,15 @@ class EventsWorkerStore(SQLBaseStore): sql = """\ SELECT e.event_id, - e.internal_metadata, - e.json, - e.format_version, + e.stream_ordering, + ej.internal_metadata, + ej.json, + ej.format_version, r.room_version, rej.reason - FROM event_json as e - LEFT JOIN rooms r USING (room_id) + FROM events AS e + JOIN event_json AS ej USING (event_id) + LEFT JOIN rooms r ON r.room_id = e.room_id LEFT JOIN rejections as rej USING (event_id) WHERE """ @@ -842,11 +847,12 @@ class EventsWorkerStore(SQLBaseStore): event_id = row[0] event_dict[event_id] = { "event_id": event_id, - "internal_metadata": row[1], - "json": row[2], - "format_version": row[3], - "room_version_id": row[4], - "rejected_reason": row[5], + "stream_ordering": row[1], + "internal_metadata": row[2], + "json": row[3], + "format_version": row[4], + "room_version_id": row[5], + "rejected_reason": row[6], "redactions": [], } diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 1d27439536..a94bec1ac5 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -589,19 +589,6 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore, metaclass=abc.ABCMeta): ) return "t%d-%d" % (topo, token) - async def get_stream_id_for_event(self, event_id: str) -> int: - """The stream ID for an event - Args: - event_id: The id of the event to look up a stream token for. - Raises: - StoreError if the event wasn't in the database. - Returns: - A stream ID. - """ - return await self.db_pool.runInteraction( - "get_stream_id_for_event", self.get_stream_id_for_event_txn, event_id, - ) - def get_stream_id_for_event_txn( self, txn: LoggingTransaction, event_id: str, allow_none=False, ) -> int: diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py index 72939f3984..4d2d88d1f0 100644 --- a/synapse/storage/persist_events.py +++ b/synapse/storage/persist_events.py @@ -248,6 +248,8 @@ class EventsPersistenceStorage: await make_deferred_yieldable(deferred) event_stream_id = event.internal_metadata.stream_ordering + # stream ordering should have been assigned by now + assert event_stream_id pos = PersistedEventPosition(self._instance_name, event_stream_id) return pos, self.main_store.get_room_max_token() From 0991a2da93b6b2010e6ef8f732ffdc3b5b382bab Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 5 Oct 2020 14:57:46 +0100 Subject: [PATCH 106/134] Allow ThirdPartyEventRules modules to manipulate public room state (#8292) This PR allows `ThirdPartyEventRules` modules to view, manipulate and block changes to the state of whether a room is published in the public rooms directory. While the idea of whether a room is in the public rooms list is not kept within an event in the room, `ThirdPartyEventRules` generally deal with controlling which modifications can happen to a room. Public rooms fits within that idea, even if its toggle state isn't controlled through a state event. --- UPGRADE.rst | 17 +++++++ changelog.d/8292.feature | 1 + synapse/events/third_party_rules.py | 51 +++++++++++++++++--- synapse/handlers/directory.py | 10 ++++ synapse/handlers/room.py | 9 ++++ synapse/module_api/__init__.py | 67 ++++++++++++++++++++++++++ tests/module_api/test_api.py | 56 ++++++++++++++++++++- tests/rest/client/third_party_rules.py | 31 +++++++----- 8 files changed, 223 insertions(+), 19 deletions(-) create mode 100644 changelog.d/8292.feature diff --git a/UPGRADE.rst b/UPGRADE.rst index 49e86e628f..5a68312217 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -75,6 +75,23 @@ for example: wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb +Upgrading to v1.22.0 +==================== + +ThirdPartyEventRules breaking changes +------------------------------------- + +This release introduces a backwards-incompatible change to modules making use of +``ThirdPartyEventRules`` in Synapse. If you make use of a module defined under the +``third_party_event_rules`` config option, please make sure it is updated to handle +the below change: + +The ``http_client`` argument is no longer passed to modules as they are initialised. Instead, +modules are expected to make use of the ``http_client`` property on the ``ModuleApi`` class. +Modules are now passed a ``module_api`` argument during initialisation, which is an instance of +``ModuleApi``. ``ModuleApi`` instances have a ``http_client`` property which acts the same as +the ``http_client`` argument previously passed to ``ThirdPartyEventRules`` modules. + Upgrading to v1.21.0 ==================== diff --git a/changelog.d/8292.feature b/changelog.d/8292.feature new file mode 100644 index 0000000000..6d0335e2c8 --- /dev/null +++ b/changelog.d/8292.feature @@ -0,0 +1 @@ +Allow `ThirdPartyEventRules` modules to query and manipulate whether a room is in the public rooms directory. \ No newline at end of file diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index 9d5310851c..fed459198a 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -12,10 +12,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Callable from synapse.events import EventBase from synapse.events.snapshot import EventContext -from synapse.types import Requester +from synapse.module_api import ModuleApi +from synapse.types import Requester, StateMap class ThirdPartyEventRules: @@ -38,7 +40,7 @@ class ThirdPartyEventRules: if module is not None: self.third_party_rules = module( - config=config, http_client=hs.get_simple_http_client() + config=config, module_api=ModuleApi(hs, hs.get_auth_handler()), ) async def check_event_allowed( @@ -106,6 +108,46 @@ class ThirdPartyEventRules: if self.third_party_rules is None: return True + state_events = await self._get_state_map_for_room(room_id) + + ret = await self.third_party_rules.check_threepid_can_be_invited( + medium, address, state_events + ) + return ret + + async def check_visibility_can_be_modified( + self, room_id: str, new_visibility: str + ) -> bool: + """Check if a room is allowed to be published to, or removed from, the public room + list. + + Args: + room_id: The ID of the room. + new_visibility: The new visibility state. Either "public" or "private". + + Returns: + True if the room's visibility can be modified, False if not. + """ + if self.third_party_rules is None: + return True + + check_func = getattr(self.third_party_rules, "check_visibility_can_be_modified") + if not check_func or not isinstance(check_func, Callable): + return True + + state_events = await self._get_state_map_for_room(room_id) + + return await check_func(room_id, state_events, new_visibility) + + async def _get_state_map_for_room(self, room_id: str) -> StateMap[EventBase]: + """Given a room ID, return the state events of that room. + + Args: + room_id: The ID of the room. + + Returns: + A dict mapping (event type, state key) to state event. + """ state_ids = await self.store.get_filtered_current_state_ids(room_id) room_state_events = await self.store.get_events(state_ids.values()) @@ -113,7 +155,4 @@ class ThirdPartyEventRules: for key, event_id in state_ids.items(): state_events[key] = room_state_events[event_id] - ret = await self.third_party_rules.check_threepid_can_be_invited( - medium, address, state_events - ) - return ret + return state_events diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 6f15c68240..ad5683d251 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -46,6 +46,7 @@ class DirectoryHandler(BaseHandler): self.config = hs.config self.enable_room_list_search = hs.config.enable_room_list_search self.require_membership = hs.config.require_membership_for_aliases + self.third_party_event_rules = hs.get_third_party_event_rules() self.federation = hs.get_federation_client() hs.get_federation_registry().register_query_handler( @@ -454,6 +455,15 @@ class DirectoryHandler(BaseHandler): # per alias creation rule? raise SynapseError(403, "Not allowed to publish room") + # Check if publishing is blocked by a third party module + allowed_by_third_party_rules = await ( + self.third_party_event_rules.check_visibility_can_be_modified( + room_id, visibility + ) + ) + if not allowed_by_third_party_rules: + raise SynapseError(403, "Not allowed to publish room") + await self.store.set_room_is_public(room_id, making_public) async def edit_published_appservice_room_list( diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index f1a6699cd4..f14f791586 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -681,6 +681,15 @@ class RoomCreationHandler(BaseHandler): creator_id=user_id, is_public=is_public, room_version=room_version, ) + # Check whether this visibility value is blocked by a third party module + allowed_by_third_party_rules = await ( + self.third_party_event_rules.check_visibility_can_be_modified( + room_id, visibility + ) + ) + if not allowed_by_third_party_rules: + raise SynapseError(403, "Room visibility value not allowed.") + directory_handler = self.hs.get_handlers().directory_handler if room_alias: await directory_handler.create_association( diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index fcbd5378c4..646f09d2bc 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -14,13 +14,18 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from typing import TYPE_CHECKING from twisted.internet import defer +from synapse.http.client import SimpleHttpClient from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.types import UserID +if TYPE_CHECKING: + from synapse.server import HomeServer + """ This package defines the 'stable' API which can be used by extension modules which are loaded into Synapse. @@ -43,6 +48,27 @@ class ModuleApi: self._auth = hs.get_auth() self._auth_handler = auth_handler + # We expose these as properties below in order to attach a helpful docstring. + self._http_client = hs.get_simple_http_client() # type: SimpleHttpClient + self._public_room_list_manager = PublicRoomListManager(hs) + + @property + def http_client(self): + """Allows making outbound HTTP requests to remote resources. + + An instance of synapse.http.client.SimpleHttpClient + """ + return self._http_client + + @property + def public_room_list_manager(self): + """Allows adding to, removing from and checking the status of rooms in the + public room list. + + An instance of synapse.module_api.PublicRoomListManager + """ + return self._public_room_list_manager + def get_user_by_req(self, req, allow_guest=False): """Check the access_token provided for a request @@ -266,3 +292,44 @@ class ModuleApi: await self._auth_handler.complete_sso_login( registered_user_id, request, client_redirect_url, ) + + +class PublicRoomListManager: + """Contains methods for adding to, removing from and querying whether a room + is in the public room list. + """ + + def __init__(self, hs: "HomeServer"): + self._store = hs.get_datastore() + + async def room_is_in_public_room_list(self, room_id: str) -> bool: + """Checks whether a room is in the public room list. + + Args: + room_id: The ID of the room. + + Returns: + Whether the room is in the public room list. Returns False if the room does + not exist. + """ + room = await self._store.get_room(room_id) + if not room: + return False + + return room.get("is_public", False) + + async def add_room_to_public_room_list(self, room_id: str) -> None: + """Publishes a room to the public room list. + + Args: + room_id: The ID of the room. + """ + await self._store.set_room_is_public(room_id, True) + + async def remove_room_from_public_room_list(self, room_id: str) -> None: + """Removes a room from the public room list. + + Args: + room_id: The ID of the room. + """ + await self._store.set_room_is_public(room_id, False) diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index 04de0b9dbe..54600ad983 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -12,13 +12,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from synapse.module_api import ModuleApi +from synapse.rest import admin +from synapse.rest.client.v1 import login, room from tests.unittest import HomeserverTestCase class ModuleApiTestCase(HomeserverTestCase): + servlets = [ + admin.register_servlets, + login.register_servlets, + room.register_servlets, + ] + def prepare(self, reactor, clock, homeserver): self.store = homeserver.get_datastore() self.module_api = ModuleApi(homeserver, homeserver.get_auth_handler()) @@ -52,3 +59,50 @@ class ModuleApiTestCase(HomeserverTestCase): # Check that the displayname was assigned displayname = self.get_success(self.store.get_profile_displayname("bob")) self.assertEqual(displayname, "Bobberino") + + def test_public_rooms(self): + """Tests that a room can be added and removed from the public rooms list, + as well as have its public rooms directory state queried. + """ + # Create a user and room to play with + user_id = self.register_user("kermit", "monkey") + tok = self.login("kermit", "monkey") + room_id = self.helper.create_room_as(user_id, tok=tok) + + # The room should not currently be in the public rooms directory + is_in_public_rooms = self.get_success( + self.module_api.public_room_list_manager.room_is_in_public_room_list( + room_id + ) + ) + self.assertFalse(is_in_public_rooms) + + # Let's try adding it to the public rooms directory + self.get_success( + self.module_api.public_room_list_manager.add_room_to_public_room_list( + room_id + ) + ) + + # And checking whether it's in there... + is_in_public_rooms = self.get_success( + self.module_api.public_room_list_manager.room_is_in_public_room_list( + room_id + ) + ) + self.assertTrue(is_in_public_rooms) + + # Let's remove it again + self.get_success( + self.module_api.public_room_list_manager.remove_room_from_public_room_list( + room_id + ) + ) + + # Should be gone + is_in_public_rooms = self.get_success( + self.module_api.public_room_list_manager.room_is_in_public_room_list( + room_id + ) + ) + self.assertFalse(is_in_public_rooms) diff --git a/tests/rest/client/third_party_rules.py b/tests/rest/client/third_party_rules.py index 8c24add530..715e87de08 100644 --- a/tests/rest/client/third_party_rules.py +++ b/tests/rest/client/third_party_rules.py @@ -12,18 +12,23 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from synapse.rest import admin from synapse.rest.client.v1 import login, room +from synapse.types import Requester from tests import unittest class ThirdPartyRulesTestModule: - def __init__(self, config): + def __init__(self, config, *args, **kwargs): pass - def check_event_allowed(self, event, context): + async def on_create_room( + self, requester: Requester, config: dict, is_requester_admin: bool + ): + return True + + async def check_event_allowed(self, event, context): if event.type == "foo.bar.forbidden": return False else: @@ -51,29 +56,31 @@ class ThirdPartyRulesTestCase(unittest.HomeserverTestCase): self.hs = self.setup_test_homeserver(config=config) return self.hs + def prepare(self, reactor, clock, homeserver): + # Create a user and room to play with during the tests + self.user_id = self.register_user("kermit", "monkey") + self.tok = self.login("kermit", "monkey") + + self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) + def test_third_party_rules(self): """Tests that a forbidden event is forbidden from being sent, but an allowed one can be sent. """ - user_id = self.register_user("kermit", "monkey") - tok = self.login("kermit", "monkey") - - room_id = self.helper.create_room_as(user_id, tok=tok) - request, channel = self.make_request( "PUT", - "/_matrix/client/r0/rooms/%s/send/foo.bar.allowed/1" % room_id, + "/_matrix/client/r0/rooms/%s/send/foo.bar.allowed/1" % self.room_id, {}, - access_token=tok, + access_token=self.tok, ) self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) request, channel = self.make_request( "PUT", - "/_matrix/client/r0/rooms/%s/send/foo.bar.forbidden/1" % room_id, + "/_matrix/client/r0/rooms/%s/send/foo.bar.forbidden/1" % self.room_id, {}, - access_token=tok, + access_token=self.tok, ) self.render(request) self.assertEquals(channel.result["code"], b"403", channel.result) From da11cc22be37e8858c19774779ad7d02d64a458c Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 5 Oct 2020 10:24:17 -0400 Subject: [PATCH 107/134] Ensure that event.redacts is the proper type before handling it (#8457) This fixes a bug when backfilling invalid events. --- changelog.d/8457.bugfix | 1 + synapse/event_auth.py | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 changelog.d/8457.bugfix diff --git a/changelog.d/8457.bugfix b/changelog.d/8457.bugfix new file mode 100644 index 0000000000..545b06d180 --- /dev/null +++ b/changelog.d/8457.bugfix @@ -0,0 +1 @@ +Fix a bug where backfilling a room with an event that was missing the `redacts` field would break. diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 8c907ad596..56f8dc9caf 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -446,6 +446,8 @@ def check_redaction( if room_version_obj.event_format == EventFormatVersions.V1: redacter_domain = get_domain_from_id(event.event_id) + if not isinstance(event.redacts, str): + return False redactee_domain = get_domain_from_id(event.redacts) if redacter_domain == redactee_domain: return True From b520a1bf5a272b04473f485def18a9e6f6e4c3b9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 2 Oct 2020 16:45:41 +0100 Subject: [PATCH 108/134] De-duplicate duplicate handling move the "duplicate state event" handling down into `handle_new_client_event` where it can be shared between multiple call paths. --- synapse/handlers/message.py | 41 ++++++++++++++++++--------------- synapse/handlers/room_member.py | 29 ++++++++--------------- 2 files changed, 32 insertions(+), 38 deletions(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 00513fbf37..ea8e3517d7 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -674,22 +674,14 @@ class EventCreationHandler: assert self.hs.is_mine(user), "User must be our own: %s" % (user,) - if event.is_state(): - prev_event = await self.deduplicate_state_event(event, context) - if prev_event is not None: - logger.info( - "Not bothering to persist state event %s duplicated by %s", - event.event_id, - prev_event.event_id, - ) - # we know it was persisted, so must have a stream ordering - assert prev_event.internal_metadata.stream_ordering - return prev_event.internal_metadata.stream_ordering - - return await self.handle_new_client_event( + ev = await self.handle_new_client_event( requester=requester, event=event, context=context, ratelimit=ratelimit ) + # we know it was persisted, so must have a stream ordering + assert ev.internal_metadata.stream_ordering + return ev.internal_metadata.stream_ordering + async def deduplicate_state_event( self, event: EventBase, context: EventContext ) -> Optional[EventBase]: @@ -845,8 +837,10 @@ class EventCreationHandler: context: EventContext, ratelimit: bool = True, extra_users: List[UserID] = [], - ) -> int: - """Processes a new event. This includes checking auth, persisting it, + ) -> EventBase: + """Processes a new event. + + This includes deduplicating, checking auth, persisting, notifying users, sending to remote servers, etc. If called from a worker will hit out to the master process for final @@ -860,9 +854,20 @@ class EventCreationHandler: extra_users: Any extra users to notify about event Return: - The stream_id of the persisted event. + If the event was deduplicated, the previous, duplicate, event. Otherwise, + `event`. """ + if event.is_state(): + prev_event = await self.deduplicate_state_event(event, context) + if prev_event is not None: + logger.info( + "Not bothering to persist state event %s duplicated by %s", + event.event_id, + prev_event.event_id, + ) + return prev_event + if event.is_state() and (event.type, event.state_key) == ( EventTypes.Create, "", @@ -917,13 +922,13 @@ class EventCreationHandler: ) stream_id = result["stream_id"] event.internal_metadata.stream_ordering = stream_id - return stream_id + return event stream_id = await self.persist_and_notify_client_event( requester, event, context, ratelimit=ratelimit, extra_users=extra_users ) - return stream_id + return event except Exception: # Ensure that we actually remove the entries in the push actions # staging area, if we calculated them. diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 13b749b7cb..fd8114a64d 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -188,16 +188,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): require_consent=require_consent, ) - # Check if this event matches the previous membership event for the user. - duplicate = await self.event_creation_handler.deduplicate_state_event( - event, context - ) - if duplicate is not None: - # Discard the new event since this membership change is a no-op. - # we know it was persisted, so must have a stream ordering. - assert duplicate.internal_metadata.stream_ordering - return duplicate.event_id, duplicate.internal_metadata.stream_ordering - prev_state_ids = await context.get_prev_state_ids() prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None) @@ -222,7 +212,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): retry_after_ms=int(1000 * (time_allowed - time_now_s)) ) - stream_id = await self.event_creation_handler.handle_new_client_event( + result_event = await self.event_creation_handler.handle_new_client_event( requester, event, context, extra_users=[target], ratelimit=ratelimit, ) @@ -232,7 +222,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): if prev_member_event.membership == Membership.JOIN: await self._user_left_room(target, room_id) - return event.event_id, stream_id + # we know it was persisted, so should have a stream ordering + assert result_event.internal_metadata.stream_ordering + return result_event.event_id, result_event.internal_metadata.stream_ordering async def copy_room_tags_and_direct_to_room( self, old_room_id, new_room_id, user_id @@ -673,12 +665,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta): else: requester = types.create_requester(target_user) - prev_event = await self.event_creation_handler.deduplicate_state_event( - event, context - ) - if prev_event is not None: - return - prev_state_ids = await context.get_prev_state_ids() if event.membership == Membership.JOIN: if requester.is_guest: @@ -1186,10 +1172,13 @@ class RoomMemberMasterHandler(RoomMemberHandler): context = await self.state_handler.compute_event_context(event) context.app_service = requester.app_service - stream_id = await self.event_creation_handler.handle_new_client_event( + result_event = await self.event_creation_handler.handle_new_client_event( requester, event, context, extra_users=[UserID.from_string(target_user)], ) - return event.event_id, stream_id + # we know it was persisted, so must have a stream ordering + assert result_event.internal_metadata.stream_ordering + + return result_event.event_id, result_event.internal_metadata.stream_ordering async def _user_left_room(self, target: UserID, room_id: str) -> None: """Implements RoomMemberHandler._user_left_room From 2ee302d0164b3e0495c3cd1ebb6b906fd3e04e27 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 2 Oct 2020 18:03:21 +0100 Subject: [PATCH 109/134] Move shadow-ban check down into `handle_new_client_event`. --- synapse/handlers/message.py | 32 ++++++++++++++++++++++++-------- 1 file changed, 24 insertions(+), 8 deletions(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index ea8e3517d7..8852db4eaf 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -657,25 +657,23 @@ class EventCreationHandler: Return: The stream_id of the persisted event. - Raises: - ShadowBanError if the requester has been shadow-banned. + """ if event.type == EventTypes.Member: raise SynapseError( 500, "Tried to send member event through non-member codepath" ) - if not ignore_shadow_ban and requester.shadow_banned: - # We randomly sleep a bit just to annoy the requester. - await self.clock.sleep(random.randint(1, 10)) - raise ShadowBanError() - user = UserID.from_string(event.sender) assert self.hs.is_mine(user), "User must be our own: %s" % (user,) ev = await self.handle_new_client_event( - requester=requester, event=event, context=context, ratelimit=ratelimit + requester=requester, + event=event, + context=context, + ratelimit=ratelimit, + ignore_shadow_ban=ignore_shadow_ban, ) # we know it was persisted, so must have a stream ordering @@ -837,6 +835,7 @@ class EventCreationHandler: context: EventContext, ratelimit: bool = True, extra_users: List[UserID] = [], + ignore_shadow_ban: bool = False, ) -> EventBase: """Processes a new event. @@ -853,11 +852,28 @@ class EventCreationHandler: ratelimit extra_users: Any extra users to notify about event + ignore_shadow_ban: True if shadow-banned users should be allowed to + send this event. + Return: If the event was deduplicated, the previous, duplicate, event. Otherwise, `event`. + + Raises: + ShadowBanError if the requester has been shadow-banned. """ + # we don't apply shadow-banning to membership events, so that the user + # can come and go as they want. + if ( + event.type != EventTypes.Member + and not ignore_shadow_ban + and requester.shadow_banned + ): + # We randomly sleep a bit just to annoy the requester. + await self.clock.sleep(random.randint(1, 10)) + raise ShadowBanError() + if event.is_state(): prev_event = await self.deduplicate_state_event(event, context) if prev_event is not None: From fd0282201e8876e3a860766fff89561564041260 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 5 Oct 2020 19:00:50 +0100 Subject: [PATCH 110/134] pull up event.sender assertion --- synapse/handlers/message.py | 8 ++++---- synapse/handlers/room.py | 1 + 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 8852db4eaf..5969a7130f 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -664,10 +664,6 @@ class EventCreationHandler: 500, "Tried to send member event through non-member codepath" ) - user = UserID.from_string(event.sender) - - assert self.hs.is_mine(user), "User must be our own: %s" % (user,) - ev = await self.handle_new_client_event( requester=requester, event=event, @@ -748,6 +744,10 @@ class EventCreationHandler: requester, event_dict, token_id=requester.access_token_id, txn_id=txn_id ) + assert self.hs.is_mine_id(event.sender), "User must be our own: %s" % ( + event.sender, + ) + spam_error = self.spam_checker.check_event_for_spam(event) if spam_error: if not isinstance(spam_error, str): diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index f14f791586..530bf0ab88 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -185,6 +185,7 @@ class RoomCreationHandler(BaseHandler): ShadowBanError if the requester is shadow-banned. """ user_id = requester.user.to_string() + assert self.hs.is_mine_id(user_id), "User must be our own: %s" % (user_id,) # start by allocating a new room id r = await self.store.get_room(old_room_id) From e775b5bb5bb2b9e42607f9514ae2e270270a8932 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 2 Oct 2020 18:10:55 +0100 Subject: [PATCH 111/134] kill off `send_nonmember_event` This is now redundant, and we can just call `handle_new_client_event` directly. --- synapse/handlers/message.py | 74 +++++++++++---------------------- synapse/handlers/room.py | 4 +- tests/handlers/test_register.py | 2 +- tests/unittest.py | 4 +- 4 files changed, 31 insertions(+), 53 deletions(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 5969a7130f..6d136930bf 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -635,47 +635,6 @@ class EventCreationHandler: msg = self._block_events_without_consent_error % {"consent_uri": consent_uri} raise ConsentNotGivenError(msg=msg, consent_uri=consent_uri) - async def send_nonmember_event( - self, - requester: Requester, - event: EventBase, - context: EventContext, - ratelimit: bool = True, - ignore_shadow_ban: bool = False, - ) -> int: - """ - Persists and notifies local clients and federation of an event. - - Args: - requester: The requester sending the event. - event: The event to send. - context: The context of the event. - ratelimit: Whether to rate limit this send. - ignore_shadow_ban: True if shadow-banned users should be allowed to - send this event. - - Return: - The stream_id of the persisted event. - - - """ - if event.type == EventTypes.Member: - raise SynapseError( - 500, "Tried to send member event through non-member codepath" - ) - - ev = await self.handle_new_client_event( - requester=requester, - event=event, - context=context, - ratelimit=ratelimit, - ignore_shadow_ban=ignore_shadow_ban, - ) - - # we know it was persisted, so must have a stream ordering - assert ev.internal_metadata.stream_ordering - return ev.internal_metadata.stream_ordering - async def deduplicate_state_event( self, event: EventBase, context: EventContext ) -> Optional[EventBase]: @@ -716,7 +675,7 @@ class EventCreationHandler: """ Creates an event, then sends it. - See self.create_event and self.send_nonmember_event. + See self.create_event and self.handle_new_client_event. Args: requester: The requester sending the event. @@ -726,9 +685,19 @@ class EventCreationHandler: ignore_shadow_ban: True if shadow-banned users should be allowed to send this event. + Returns: + The event, and its stream ordering (if state event deduplication happened, + the previous, duplicate event). + Raises: ShadowBanError if the requester has been shadow-banned. """ + + if event_dict["type"] == EventTypes.Member: + raise SynapseError( + 500, "Tried to send member event through non-member codepath" + ) + if not ignore_shadow_ban and requester.shadow_banned: # We randomly sleep a bit just to annoy the requester. await self.clock.sleep(random.randint(1, 10)) @@ -754,14 +723,17 @@ class EventCreationHandler: spam_error = "Spam is not permitted here" raise SynapseError(403, spam_error, Codes.FORBIDDEN) - stream_id = await self.send_nonmember_event( - requester, - event, - context, + ev = await self.handle_new_client_event( + requester=requester, + event=event, + context=context, ratelimit=ratelimit, ignore_shadow_ban=ignore_shadow_ban, ) - return event, stream_id + + # we know it was persisted, so must have a stream ordering + assert ev.internal_metadata.stream_ordering + return ev, ev.internal_metadata.stream_ordering @measure_func("create_new_client_event") async def create_new_client_event( @@ -1255,8 +1227,12 @@ class EventCreationHandler: # Since this is a dummy-event it is OK if it is sent by a # shadow-banned user. - await self.send_nonmember_event( - requester, event, context, ratelimit=False, ignore_shadow_ban=True, + await self.handle_new_client_event( + requester=requester, + event=event, + context=context, + ratelimit=False, + ignore_shadow_ban=True, ) return True except ConsentNotGivenError: diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 530bf0ab88..d0530a446c 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -230,8 +230,8 @@ class RoomCreationHandler(BaseHandler): ) # now send the tombstone - await self.event_creation_handler.send_nonmember_event( - requester, tombstone_event, tombstone_context + await self.event_creation_handler.handle_new_client_event( + requester=requester, event=tombstone_event, context=tombstone_context, ) old_room_state = await tombstone_context.get_current_state_ids() diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index cb7c0ed51a..702c6aa089 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -413,7 +413,7 @@ class RegistrationTestCase(unittest.HomeserverTestCase): ) ) self.get_success( - event_creation_handler.send_nonmember_event(requester, event, context) + event_creation_handler.handle_new_client_event(requester, event, context) ) # Register a second user, which won't be be in the room (or even have an invite) diff --git a/tests/unittest.py b/tests/unittest.py index 82ede9de34..5c87f6097e 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -608,7 +608,9 @@ class HomeserverTestCase(TestCase): if soft_failed: event.internal_metadata.soft_failed = True - self.get_success(event_creator.send_nonmember_event(requester, event, context)) + self.get_success( + event_creator.handle_new_client_event(requester, event, context) + ) return event.event_id From 103f72929aecfc4d60527590e3bb4d7e3677ef53 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 5 Oct 2020 14:51:08 +0100 Subject: [PATCH 112/134] changelog --- changelog.d/8463.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/8463.misc diff --git a/changelog.d/8463.misc b/changelog.d/8463.misc new file mode 100644 index 0000000000..040c9bb90f --- /dev/null +++ b/changelog.d/8463.misc @@ -0,0 +1 @@ +Reduce inconsistencies between codepaths for membership and non-membership events. From 4cd1448d0e16d19a1f255ed6746a7372221e84cd Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 5 Oct 2020 20:27:14 +0100 Subject: [PATCH 113/134] Fix third-party event modules for `check_visibility_can_be_modified` check PR #8292 tried to maintain backwards compat with modules which don't provide a `check_visibility_can_be_modified` method, but the tests weren't being run, and the check didn't work. --- changelog.d/8467.feature | 1 + synapse/events/third_party_rules.py | 4 +++- .../{third_party_rules.py => test_third_party_rules.py} | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelog.d/8467.feature rename tests/rest/client/{third_party_rules.py => test_third_party_rules.py} (96%) diff --git a/changelog.d/8467.feature b/changelog.d/8467.feature new file mode 100644 index 0000000000..6d0335e2c8 --- /dev/null +++ b/changelog.d/8467.feature @@ -0,0 +1 @@ +Allow `ThirdPartyEventRules` modules to query and manipulate whether a room is in the public rooms directory. \ No newline at end of file diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index fed459198a..1ca77519d5 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -131,7 +131,9 @@ class ThirdPartyEventRules: if self.third_party_rules is None: return True - check_func = getattr(self.third_party_rules, "check_visibility_can_be_modified") + check_func = getattr( + self.third_party_rules, "check_visibility_can_be_modified", None + ) if not check_func or not isinstance(check_func, Callable): return True diff --git a/tests/rest/client/third_party_rules.py b/tests/rest/client/test_third_party_rules.py similarity index 96% rename from tests/rest/client/third_party_rules.py rename to tests/rest/client/test_third_party_rules.py index 715e87de08..7b322f526c 100644 --- a/tests/rest/client/third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -49,7 +49,7 @@ class ThirdPartyRulesTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): config = self.default_config() config["third_party_event_rules"] = { - "module": "tests.rest.client.third_party_rules.ThirdPartyRulesTestModule", + "module": __name__ + ".ThirdPartyRulesTestModule", "config": {}, } From 785437dc0ddfb23012748895e1f8665338e4f3df Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 5 Oct 2020 21:40:51 +0100 Subject: [PATCH 114/134] Update default room version to 6 (#8461) Per https://github.com/matrix-org/matrix-doc/pull/2788 --- changelog.d/8461.feature | 1 + docs/sample_config.yaml | 2 +- synapse/config/server.py | 2 +- tests/rest/client/v1/test_directory.py | 11 ++++++++++- 4 files changed, 13 insertions(+), 3 deletions(-) create mode 100644 changelog.d/8461.feature diff --git a/changelog.d/8461.feature b/changelog.d/8461.feature new file mode 100644 index 0000000000..3665d670e1 --- /dev/null +++ b/changelog.d/8461.feature @@ -0,0 +1 @@ +Change default room version to "6", per [MSC2788](https://github.com/matrix-org/matrix-doc/pull/2788). diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 7126ade2de..bb64662e28 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -119,7 +119,7 @@ pid_file: DATADIR/homeserver.pid # For example, for room version 1, default_room_version should be set # to "1". # -#default_room_version: "5" +#default_room_version: "6" # The GC threshold parameters to pass to `gc.set_threshold`, if defined # diff --git a/synapse/config/server.py b/synapse/config/server.py index ef6d70e3f8..85aa49c02d 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -39,7 +39,7 @@ logger = logging.Logger(__name__) # in the list. DEFAULT_BIND_ADDRESSES = ["::", "0.0.0.0"] -DEFAULT_ROOM_VERSION = "5" +DEFAULT_ROOM_VERSION = "6" ROOM_COMPLEXITY_TOO_GREAT = ( "Your homeserver is unable to join rooms this large or complex. " diff --git a/tests/rest/client/v1/test_directory.py b/tests/rest/client/v1/test_directory.py index 633b7dbda0..ea5a7f3739 100644 --- a/tests/rest/client/v1/test_directory.py +++ b/tests/rest/client/v1/test_directory.py @@ -21,6 +21,7 @@ from synapse.types import RoomAlias from synapse.util.stringutils import random_string from tests import unittest +from tests.unittest import override_config class DirectoryTestCase(unittest.HomeserverTestCase): @@ -67,10 +68,18 @@ class DirectoryTestCase(unittest.HomeserverTestCase): self.ensure_user_joined_room() self.set_alias_via_directory(400, alias_length=256) - def test_state_event_in_room(self): + @override_config({"default_room_version": 5}) + def test_state_event_user_in_v5_room(self): + """Test that a regular user can add alias events before room v6""" self.ensure_user_joined_room() self.set_alias_via_state_event(200) + @override_config({"default_room_version": 6}) + def test_state_event_v6_room(self): + """Test that a regular user can *not* add alias events from room v6""" + self.ensure_user_joined_room() + self.set_alias_via_state_event(403) + def test_directory_in_room(self): self.ensure_user_joined_room() self.set_alias_via_directory(200) From 3e58ce72b42f2ae473c1e76a967548cd6fa7e2e6 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 6 Oct 2020 10:03:39 +0100 Subject: [PATCH 115/134] Don't bother responding to client requests that have already disconnected (#8465) This PR ports the quick fix from https://github.com/matrix-org/synapse/pull/2796 to further methods which handle media, URL preview and `/key/v2/server` requests. This prevents a harmless `ERROR` that comes up in the logs when we were unable to respond to a client request when the client had already disconnected. In this case we simply bail out if the client has already done so. This is the 'simple fix' as suggested by https://github.com/matrix-org/synapse/issues/5304#issuecomment-574740003. Fixes https://github.com/matrix-org/synapse/issues/6700 Fixes https://github.com/matrix-org/synapse/issues/5304 --- changelog.d/8465.bugfix | 1 + synapse/http/server.py | 5 +++++ synapse/rest/media/v1/_base.py | 6 ++++++ 3 files changed, 12 insertions(+) create mode 100644 changelog.d/8465.bugfix diff --git a/changelog.d/8465.bugfix b/changelog.d/8465.bugfix new file mode 100644 index 0000000000..73f895b268 --- /dev/null +++ b/changelog.d/8465.bugfix @@ -0,0 +1 @@ +Don't attempt to respond to some requests if the client has already disconnected. \ No newline at end of file diff --git a/synapse/http/server.py b/synapse/http/server.py index 09ed74f6ce..00b98af3d4 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -651,6 +651,11 @@ def respond_with_json_bytes( Returns: twisted.web.server.NOT_DONE_YET if the request is still active. """ + if request._disconnected: + logger.warning( + "Not sending response to request %s, already disconnected.", request + ) + return request.setResponseCode(code) request.setHeader(b"Content-Type", b"application/json") diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py index 6568e61829..67aa993f19 100644 --- a/synapse/rest/media/v1/_base.py +++ b/synapse/rest/media/v1/_base.py @@ -213,6 +213,12 @@ async def respond_with_responder( file_size (int|None): Size in bytes of the media. If not known it should be None upload_name (str|None): The name of the requested file, if any. """ + if request._disconnected: + logger.warning( + "Not sending response to request %s, already disconnected.", request + ) + return + if not responder: respond_404(request) return From a02446113012920c92264f632832308588649ed8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 6 Oct 2020 16:31:31 +0100 Subject: [PATCH 116/134] Additional tests for third-party event rules (#8468) * Optimise and test state fetching for 3p event rules Getting all the events at once is much more efficient than getting them individually * Test that 3p event rules can modify events --- changelog.d/8468.misc | 1 + synapse/events/third_party_rules.py | 12 +-- tests/rest/client/test_third_party_rules.py | 84 +++++++++++++++++---- 3 files changed, 79 insertions(+), 18 deletions(-) create mode 100644 changelog.d/8468.misc diff --git a/changelog.d/8468.misc b/changelog.d/8468.misc new file mode 100644 index 0000000000..32ba991e64 --- /dev/null +++ b/changelog.d/8468.misc @@ -0,0 +1 @@ +Additional testing for `ThirdPartyEventRules`. diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index 1ca77519d5..e38b8e67fb 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -61,12 +61,14 @@ class ThirdPartyEventRules: prev_state_ids = await context.get_prev_state_ids() # Retrieve the state events from the database. - state_events = {} - for key, event_id in prev_state_ids.items(): - state_events[key] = await self.store.get_event(event_id, allow_none=True) + events = await self.store.get_events(prev_state_ids.values()) + state_events = {(ev.type, ev.state_key): ev for ev in events.values()} - ret = await self.third_party_rules.check_event_allowed(event, state_events) - return ret + # The module can modify the event slightly if it wants, but caution should be + # exercised, and it's likely to go very wrong if applied to events received over + # federation. + + return await self.third_party_rules.check_event_allowed(event, state_events) async def on_create_room( self, requester: Requester, config: dict, is_requester_admin: bool diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index 7b322f526c..c12518c931 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -12,33 +12,43 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import threading + +from mock import Mock + +from synapse.events import EventBase from synapse.rest import admin from synapse.rest.client.v1 import login, room -from synapse.types import Requester +from synapse.types import Requester, StateMap from tests import unittest +thread_local = threading.local() + class ThirdPartyRulesTestModule: - def __init__(self, config, *args, **kwargs): - pass + def __init__(self, config, module_api): + # keep a record of the "current" rules module, so that the test can patch + # it if desired. + thread_local.rules_module = self async def on_create_room( self, requester: Requester, config: dict, is_requester_admin: bool ): return True - async def check_event_allowed(self, event, context): - if event.type == "foo.bar.forbidden": - return False - else: - return True + async def check_event_allowed(self, event: EventBase, state: StateMap[EventBase]): + return True @staticmethod def parse_config(config): return config +def current_rules_module() -> ThirdPartyRulesTestModule: + return thread_local.rules_module + + class ThirdPartyRulesTestCase(unittest.HomeserverTestCase): servlets = [ admin.register_servlets, @@ -46,15 +56,13 @@ class ThirdPartyRulesTestCase(unittest.HomeserverTestCase): room.register_servlets, ] - def make_homeserver(self, reactor, clock): - config = self.default_config() + def default_config(self): + config = super().default_config() config["third_party_event_rules"] = { "module": __name__ + ".ThirdPartyRulesTestModule", "config": {}, } - - self.hs = self.setup_test_homeserver(config=config) - return self.hs + return config def prepare(self, reactor, clock, homeserver): # Create a user and room to play with during the tests @@ -67,6 +75,14 @@ class ThirdPartyRulesTestCase(unittest.HomeserverTestCase): """Tests that a forbidden event is forbidden from being sent, but an allowed one can be sent. """ + # patch the rules module with a Mock which will return False for some event + # types + async def check(ev, state): + return ev.type != "foo.bar.forbidden" + + callback = Mock(spec=[], side_effect=check) + current_rules_module().check_event_allowed = callback + request, channel = self.make_request( "PUT", "/_matrix/client/r0/rooms/%s/send/foo.bar.allowed/1" % self.room_id, @@ -76,6 +92,16 @@ class ThirdPartyRulesTestCase(unittest.HomeserverTestCase): self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) + callback.assert_called_once() + + # there should be various state events in the state arg: do some basic checks + state_arg = callback.call_args[0][1] + for k in (("m.room.create", ""), ("m.room.member", self.user_id)): + self.assertIn(k, state_arg) + ev = state_arg[k] + self.assertEqual(ev.type, k[0]) + self.assertEqual(ev.state_key, k[1]) + request, channel = self.make_request( "PUT", "/_matrix/client/r0/rooms/%s/send/foo.bar.forbidden/1" % self.room_id, @@ -84,3 +110,35 @@ class ThirdPartyRulesTestCase(unittest.HomeserverTestCase): ) self.render(request) self.assertEquals(channel.result["code"], b"403", channel.result) + + def test_modify_event(self): + """Tests that the module can successfully tweak an event before it is persisted. + """ + # first patch the event checker so that it will modify the event + async def check(ev: EventBase, state): + ev.content = {"x": "y"} + return True + + current_rules_module().check_event_allowed = check + + # now send the event + request, channel = self.make_request( + "PUT", + "/_matrix/client/r0/rooms/%s/send/modifyme/1" % self.room_id, + {"x": "x"}, + access_token=self.tok, + ) + self.render(request) + self.assertEqual(channel.result["code"], b"200", channel.result) + event_id = channel.json_body["event_id"] + + # ... and check that it got modified + request, channel = self.make_request( + "GET", + "/_matrix/client/r0/rooms/%s/event/%s" % (self.room_id, event_id), + access_token=self.tok, + ) + self.render(request) + self.assertEqual(channel.result["code"], b"200", channel.result) + ev = channel.json_body + self.assertEqual(ev["content"]["x"], "y") From 3cd78bbe9e208d2e93ccebee5d3586ee5f5a5d31 Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Tue, 6 Oct 2020 13:26:29 -0400 Subject: [PATCH 117/134] Add support for MSC2732: olm fallback keys (#8312) --- changelog.d/8312.feature | 1 + scripts/synapse_port_db | 1 + synapse/handlers/e2e_keys.py | 16 +++ synapse/handlers/sync.py | 8 ++ synapse/rest/client/v2_alpha/sync.py | 1 + .../storage/databases/main/end_to_end_keys.py | 100 +++++++++++++++++- .../main/schema/delta/58/11fallback.sql | 24 +++++ tests/handlers/test_e2e_keys.py | 65 ++++++++++++ 8 files changed, 215 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8312.feature create mode 100644 synapse/storage/databases/main/schema/delta/58/11fallback.sql diff --git a/changelog.d/8312.feature b/changelog.d/8312.feature new file mode 100644 index 0000000000..222a1b032a --- /dev/null +++ b/changelog.d/8312.feature @@ -0,0 +1 @@ +Add support for olm fallback keys ([MSC2732](https://github.com/matrix-org/matrix-doc/pull/2732)). \ No newline at end of file diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index 7e12f5440c..2d0b59ab53 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -90,6 +90,7 @@ BOOLEAN_COLUMNS = { "room_stats_state": ["is_federatable"], "local_media_repository": ["safe_from_quarantine"], "users": ["shadow_banned"], + "e2e_fallback_keys_json": ["used"], } diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index dd40fd1299..611742ae72 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -496,6 +496,22 @@ class E2eKeysHandler: log_kv( {"message": "Did not update one_time_keys", "reason": "no keys given"} ) + fallback_keys = keys.get("org.matrix.msc2732.fallback_keys", None) + if fallback_keys and isinstance(fallback_keys, dict): + log_kv( + { + "message": "Updating fallback_keys for device.", + "user_id": user_id, + "device_id": device_id, + } + ) + await self.store.set_e2e_fallback_keys(user_id, device_id, fallback_keys) + elif fallback_keys: + log_kv({"message": "Did not update fallback_keys", "reason": "not a dict"}) + else: + log_kv( + {"message": "Did not update fallback_keys", "reason": "no keys given"} + ) # the device should have been registered already, but it may have been # deleted due to a race with a DELETE request. Or we may be using an diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index a998e6b7f6..dd1f90e359 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -201,6 +201,8 @@ class SyncResult: device_lists: List of user_ids whose devices have changed device_one_time_keys_count: Dict of algorithm to count for one time keys for this device + device_unused_fallback_key_types: List of key types that have an unused fallback + key groups: Group updates, if any """ @@ -213,6 +215,7 @@ class SyncResult: to_device = attr.ib(type=List[JsonDict]) device_lists = attr.ib(type=DeviceLists) device_one_time_keys_count = attr.ib(type=JsonDict) + device_unused_fallback_key_types = attr.ib(type=List[str]) groups = attr.ib(type=Optional[GroupsSyncResult]) def __bool__(self) -> bool: @@ -1014,10 +1017,14 @@ class SyncHandler: logger.debug("Fetching OTK data") device_id = sync_config.device_id one_time_key_counts = {} # type: JsonDict + unused_fallback_key_types = [] # type: List[str] if device_id: one_time_key_counts = await self.store.count_e2e_one_time_keys( user_id, device_id ) + unused_fallback_key_types = await self.store.get_e2e_unused_fallback_key_types( + user_id, device_id + ) logger.debug("Fetching group data") await self._generate_sync_entry_for_groups(sync_result_builder) @@ -1041,6 +1048,7 @@ class SyncHandler: device_lists=device_lists, groups=sync_result_builder.groups, device_one_time_keys_count=one_time_key_counts, + device_unused_fallback_key_types=unused_fallback_key_types, next_batch=sync_result_builder.now_token, ) diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 6779df952f..2b84eb89c0 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -236,6 +236,7 @@ class SyncRestServlet(RestServlet): "leave": sync_result.groups.leave, }, "device_one_time_keys_count": sync_result.device_one_time_keys_count, + "org.matrix.msc2732.device_unused_fallback_key_types": sync_result.device_unused_fallback_key_types, "next_batch": await sync_result.next_batch.to_string(self.store), } diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 22e1ed15d0..8c97f2af5c 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -367,6 +367,57 @@ class EndToEndKeyWorkerStore(SQLBaseStore): "count_e2e_one_time_keys", _count_e2e_one_time_keys ) + async def set_e2e_fallback_keys( + self, user_id: str, device_id: str, fallback_keys: JsonDict + ) -> None: + """Set the user's e2e fallback keys. + + Args: + user_id: the user whose keys are being set + device_id: the device whose keys are being set + fallback_keys: the keys to set. This is a map from key ID (which is + of the form "algorithm:id") to key data. + """ + # fallback_keys will usually only have one item in it, so using a for + # loop (as opposed to calling simple_upsert_many_txn) won't be too bad + # FIXME: make sure that only one key per algorithm is uploaded + for key_id, fallback_key in fallback_keys.items(): + algorithm, key_id = key_id.split(":", 1) + await self.db_pool.simple_upsert( + "e2e_fallback_keys_json", + keyvalues={ + "user_id": user_id, + "device_id": device_id, + "algorithm": algorithm, + }, + values={ + "key_id": key_id, + "key_json": json_encoder.encode(fallback_key), + "used": False, + }, + desc="set_e2e_fallback_key", + ) + + @cached(max_entries=10000) + async def get_e2e_unused_fallback_key_types( + self, user_id: str, device_id: str + ) -> List[str]: + """Returns the fallback key types that have an unused key. + + Args: + user_id: the user whose keys are being queried + device_id: the device whose keys are being queried + + Returns: + a list of key types + """ + return await self.db_pool.simple_select_onecol( + "e2e_fallback_keys_json", + keyvalues={"user_id": user_id, "device_id": device_id, "used": False}, + retcol="algorithm", + desc="get_e2e_unused_fallback_key_types", + ) + async def get_e2e_cross_signing_key( self, user_id: str, key_type: str, from_user_id: Optional[str] = None ) -> Optional[dict]: @@ -701,15 +752,37 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): " WHERE user_id = ? AND device_id = ? AND algorithm = ?" " LIMIT 1" ) + fallback_sql = ( + "SELECT key_id, key_json, used FROM e2e_fallback_keys_json" + " WHERE user_id = ? AND device_id = ? AND algorithm = ?" + " LIMIT 1" + ) result = {} delete = [] + used_fallbacks = [] for user_id, device_id, algorithm in query_list: user_result = result.setdefault(user_id, {}) device_result = user_result.setdefault(device_id, {}) txn.execute(sql, (user_id, device_id, algorithm)) - for key_id, key_json in txn: + otk_row = txn.fetchone() + if otk_row is not None: + key_id, key_json = otk_row device_result[algorithm + ":" + key_id] = key_json delete.append((user_id, device_id, algorithm, key_id)) + else: + # no one-time key available, so see if there's a fallback + # key + txn.execute(fallback_sql, (user_id, device_id, algorithm)) + fallback_row = txn.fetchone() + if fallback_row is not None: + key_id, key_json, used = fallback_row + device_result[algorithm + ":" + key_id] = key_json + if not used: + used_fallbacks.append( + (user_id, device_id, algorithm, key_id) + ) + + # drop any one-time keys that were claimed sql = ( "DELETE FROM e2e_one_time_keys_json" " WHERE user_id = ? AND device_id = ? AND algorithm = ?" @@ -726,6 +799,23 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): self._invalidate_cache_and_stream( txn, self.count_e2e_one_time_keys, (user_id, device_id) ) + # mark fallback keys as used + for user_id, device_id, algorithm, key_id in used_fallbacks: + self.db_pool.simple_update_txn( + txn, + "e2e_fallback_keys_json", + { + "user_id": user_id, + "device_id": device_id, + "algorithm": algorithm, + "key_id": key_id, + }, + {"used": True}, + ) + self._invalidate_cache_and_stream( + txn, self.get_e2e_unused_fallback_key_types, (user_id, device_id) + ) + return result return await self.db_pool.runInteraction( @@ -754,6 +844,14 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): self._invalidate_cache_and_stream( txn, self.count_e2e_one_time_keys, (user_id, device_id) ) + self.db_pool.simple_delete_txn( + txn, + table="e2e_fallback_keys_json", + keyvalues={"user_id": user_id, "device_id": device_id}, + ) + self._invalidate_cache_and_stream( + txn, self.get_e2e_unused_fallback_key_types, (user_id, device_id) + ) await self.db_pool.runInteraction( "delete_e2e_keys_by_device", delete_e2e_keys_by_device_txn diff --git a/synapse/storage/databases/main/schema/delta/58/11fallback.sql b/synapse/storage/databases/main/schema/delta/58/11fallback.sql new file mode 100644 index 0000000000..4ed981dbf8 --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/58/11fallback.sql @@ -0,0 +1,24 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS e2e_fallback_keys_json ( + user_id TEXT NOT NULL, -- The user this fallback key is for. + device_id TEXT NOT NULL, -- The device this fallback key is for. + algorithm TEXT NOT NULL, -- Which algorithm this fallback key is for. + key_id TEXT NOT NULL, -- An id for suppressing duplicate uploads. + key_json TEXT NOT NULL, -- The key as a JSON blob. + used BOOLEAN NOT NULL DEFAULT FALSE, -- Whether the key has been used or not. + CONSTRAINT e2e_fallback_keys_json_uniqueness UNIQUE (user_id, device_id, algorithm) +); diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 366dcfb670..4e9e3dcbc2 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -171,6 +171,71 @@ class E2eKeysHandlerTestCase(unittest.TestCase): }, ) + @defer.inlineCallbacks + def test_fallback_key(self): + local_user = "@boris:" + self.hs.hostname + device_id = "xyz" + fallback_key = {"alg1:k1": "key1"} + otk = {"alg1:k2": "key2"} + + yield defer.ensureDeferred( + self.handler.upload_keys_for_user( + local_user, + device_id, + {"org.matrix.msc2732.fallback_keys": fallback_key}, + ) + ) + + # claiming an OTK when no OTKs are available should return the fallback + # key + res = yield defer.ensureDeferred( + self.handler.claim_one_time_keys( + {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None + ) + ) + self.assertEqual( + res, + {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key}}}, + ) + + # claiming an OTK again should return the same fallback key + res = yield defer.ensureDeferred( + self.handler.claim_one_time_keys( + {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None + ) + ) + self.assertEqual( + res, + {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key}}}, + ) + + # if the user uploads a one-time key, the next claim should fetch the + # one-time key, and then go back to the fallback + yield defer.ensureDeferred( + self.handler.upload_keys_for_user( + local_user, device_id, {"one_time_keys": otk} + ) + ) + + res = yield defer.ensureDeferred( + self.handler.claim_one_time_keys( + {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None + ) + ) + self.assertEqual( + res, {"failures": {}, "one_time_keys": {local_user: {device_id: otk}}}, + ) + + res = yield defer.ensureDeferred( + self.handler.claim_one_time_keys( + {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None + ) + ) + self.assertEqual( + res, + {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key}}}, + ) + @defer.inlineCallbacks def test_replace_master_key(self): """uploading a new signing key should make the old signing key unavailable""" From 903fcd2d3561813b80706b07e1dcc19eb47ec260 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 7 Oct 2020 11:28:05 +0100 Subject: [PATCH 118/134] update wording --- synapse/handlers/message.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 6d136930bf..3e9a22e8f3 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -835,8 +835,9 @@ class EventCreationHandler: ShadowBanError if the requester has been shadow-banned. """ - # we don't apply shadow-banning to membership events, so that the user - # can come and go as they want. + # we don't apply shadow-banning to membership events here. Invites are blocked + # higher up the stack, and we allow shadow-banned users to send join and leave + # events as normal. if ( event.type != EventTypes.Member and not ignore_shadow_ban From 01f82bfe32c52e0020e63d544a8f59e5e97aab52 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 7 Oct 2020 11:45:31 +0100 Subject: [PATCH 119/134] Remove docs/sphinx and related references (#8480) https://github.com/matrix-org/synapse/tree/develop/docs/sphinx doesn't seem to really be utilised or changed recently since the initial commit. I like the idea of exportable documentation of the codebase, but at the moment after running through the build instructions the generated website wasn't very useful... --- README.rst | 13 - changelog.d/8480.misc | 1 + docs/code_style.md | 2 - docs/sphinx/README.rst | 1 - docs/sphinx/conf.py | 271 ------------------ docs/sphinx/index.rst | 20 -- docs/sphinx/modules.rst | 7 - docs/sphinx/synapse.api.auth.rst | 7 - docs/sphinx/synapse.api.constants.rst | 7 - docs/sphinx/synapse.api.dbobjects.rst | 7 - docs/sphinx/synapse.api.errors.rst | 7 - docs/sphinx/synapse.api.event_stream.rst | 7 - docs/sphinx/synapse.api.events.factory.rst | 7 - docs/sphinx/synapse.api.events.room.rst | 7 - docs/sphinx/synapse.api.events.rst | 18 -- docs/sphinx/synapse.api.handlers.events.rst | 7 - docs/sphinx/synapse.api.handlers.factory.rst | 7 - .../synapse.api.handlers.federation.rst | 7 - docs/sphinx/synapse.api.handlers.register.rst | 7 - docs/sphinx/synapse.api.handlers.room.rst | 7 - docs/sphinx/synapse.api.handlers.rst | 21 -- docs/sphinx/synapse.api.notifier.rst | 7 - docs/sphinx/synapse.api.register_events.rst | 7 - docs/sphinx/synapse.api.room_events.rst | 7 - docs/sphinx/synapse.api.rst | 30 -- docs/sphinx/synapse.api.server.rst | 7 - docs/sphinx/synapse.api.storage.rst | 7 - docs/sphinx/synapse.api.stream.rst | 7 - docs/sphinx/synapse.api.streams.event.rst | 7 - docs/sphinx/synapse.api.streams.rst | 17 -- docs/sphinx/synapse.app.homeserver.rst | 7 - docs/sphinx/synapse.app.rst | 17 -- docs/sphinx/synapse.db.rst | 10 - docs/sphinx/synapse.federation.handler.rst | 7 - docs/sphinx/synapse.federation.messaging.rst | 7 - docs/sphinx/synapse.federation.pdu_codec.rst | 7 - .../sphinx/synapse.federation.persistence.rst | 7 - .../sphinx/synapse.federation.replication.rst | 7 - docs/sphinx/synapse.federation.rst | 22 -- docs/sphinx/synapse.federation.transport.rst | 7 - docs/sphinx/synapse.federation.units.rst | 7 - docs/sphinx/synapse.persistence.rst | 19 -- docs/sphinx/synapse.persistence.service.rst | 7 - docs/sphinx/synapse.persistence.tables.rst | 7 - .../synapse.persistence.transactions.rst | 7 - docs/sphinx/synapse.rest.base.rst | 7 - docs/sphinx/synapse.rest.events.rst | 7 - docs/sphinx/synapse.rest.register.rst | 7 - docs/sphinx/synapse.rest.room.rst | 7 - docs/sphinx/synapse.rest.rst | 20 -- docs/sphinx/synapse.rst | 30 -- docs/sphinx/synapse.server.rst | 7 - docs/sphinx/synapse.state.rst | 7 - docs/sphinx/synapse.util.async.rst | 7 - docs/sphinx/synapse.util.dbutils.rst | 7 - docs/sphinx/synapse.util.http.rst | 7 - docs/sphinx/synapse.util.lockutils.rst | 7 - docs/sphinx/synapse.util.logutils.rst | 7 - docs/sphinx/synapse.util.rst | 21 -- docs/sphinx/synapse.util.stringutils.rst | 7 - scripts-dev/sphinx_api_docs.sh | 1 - setup.cfg | 5 - 62 files changed, 1 insertion(+), 839 deletions(-) create mode 100644 changelog.d/8480.misc delete mode 100644 docs/sphinx/README.rst delete mode 100644 docs/sphinx/conf.py delete mode 100644 docs/sphinx/index.rst delete mode 100644 docs/sphinx/modules.rst delete mode 100644 docs/sphinx/synapse.api.auth.rst delete mode 100644 docs/sphinx/synapse.api.constants.rst delete mode 100644 docs/sphinx/synapse.api.dbobjects.rst delete mode 100644 docs/sphinx/synapse.api.errors.rst delete mode 100644 docs/sphinx/synapse.api.event_stream.rst delete mode 100644 docs/sphinx/synapse.api.events.factory.rst delete mode 100644 docs/sphinx/synapse.api.events.room.rst delete mode 100644 docs/sphinx/synapse.api.events.rst delete mode 100644 docs/sphinx/synapse.api.handlers.events.rst delete mode 100644 docs/sphinx/synapse.api.handlers.factory.rst delete mode 100644 docs/sphinx/synapse.api.handlers.federation.rst delete mode 100644 docs/sphinx/synapse.api.handlers.register.rst delete mode 100644 docs/sphinx/synapse.api.handlers.room.rst delete mode 100644 docs/sphinx/synapse.api.handlers.rst delete mode 100644 docs/sphinx/synapse.api.notifier.rst delete mode 100644 docs/sphinx/synapse.api.register_events.rst delete mode 100644 docs/sphinx/synapse.api.room_events.rst delete mode 100644 docs/sphinx/synapse.api.rst delete mode 100644 docs/sphinx/synapse.api.server.rst delete mode 100644 docs/sphinx/synapse.api.storage.rst delete mode 100644 docs/sphinx/synapse.api.stream.rst delete mode 100644 docs/sphinx/synapse.api.streams.event.rst delete mode 100644 docs/sphinx/synapse.api.streams.rst delete mode 100644 docs/sphinx/synapse.app.homeserver.rst delete mode 100644 docs/sphinx/synapse.app.rst delete mode 100644 docs/sphinx/synapse.db.rst delete mode 100644 docs/sphinx/synapse.federation.handler.rst delete mode 100644 docs/sphinx/synapse.federation.messaging.rst delete mode 100644 docs/sphinx/synapse.federation.pdu_codec.rst delete mode 100644 docs/sphinx/synapse.federation.persistence.rst delete mode 100644 docs/sphinx/synapse.federation.replication.rst delete mode 100644 docs/sphinx/synapse.federation.rst delete mode 100644 docs/sphinx/synapse.federation.transport.rst delete mode 100644 docs/sphinx/synapse.federation.units.rst delete mode 100644 docs/sphinx/synapse.persistence.rst delete mode 100644 docs/sphinx/synapse.persistence.service.rst delete mode 100644 docs/sphinx/synapse.persistence.tables.rst delete mode 100644 docs/sphinx/synapse.persistence.transactions.rst delete mode 100644 docs/sphinx/synapse.rest.base.rst delete mode 100644 docs/sphinx/synapse.rest.events.rst delete mode 100644 docs/sphinx/synapse.rest.register.rst delete mode 100644 docs/sphinx/synapse.rest.room.rst delete mode 100644 docs/sphinx/synapse.rest.rst delete mode 100644 docs/sphinx/synapse.rst delete mode 100644 docs/sphinx/synapse.server.rst delete mode 100644 docs/sphinx/synapse.state.rst delete mode 100644 docs/sphinx/synapse.util.async.rst delete mode 100644 docs/sphinx/synapse.util.dbutils.rst delete mode 100644 docs/sphinx/synapse.util.http.rst delete mode 100644 docs/sphinx/synapse.util.lockutils.rst delete mode 100644 docs/sphinx/synapse.util.logutils.rst delete mode 100644 docs/sphinx/synapse.util.rst delete mode 100644 docs/sphinx/synapse.util.stringutils.rst delete mode 100644 scripts-dev/sphinx_api_docs.sh diff --git a/README.rst b/README.rst index 4a189c8bc4..e623cf863a 100644 --- a/README.rst +++ b/README.rst @@ -290,19 +290,6 @@ Testing with SyTest is recommended for verifying that changes related to the Client-Server API are functioning correctly. See the `installation instructions `_ for details. -Building Internal API Documentation -=================================== - -Before building internal API documentation install sphinx and -sphinxcontrib-napoleon:: - - pip install sphinx - pip install sphinxcontrib-napoleon - -Building internal API documentation:: - - python setup.py build_sphinx - Troubleshooting =============== diff --git a/changelog.d/8480.misc b/changelog.d/8480.misc new file mode 100644 index 0000000000..81633af296 --- /dev/null +++ b/changelog.d/8480.misc @@ -0,0 +1 @@ +Remove outdated sphinx documentation, scripts and configuration. \ No newline at end of file diff --git a/docs/code_style.md b/docs/code_style.md index 6ef6f80290..f6c825d7d4 100644 --- a/docs/code_style.md +++ b/docs/code_style.md @@ -64,8 +64,6 @@ save as it takes a while and is very resource intensive. - Use underscores for functions and variables. - **Docstrings**: should follow the [google code style](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings). - This is so that we can generate documentation with - [sphinx](http://sphinxcontrib-napoleon.readthedocs.org/en/latest/). See the [examples](http://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) in the sphinx documentation. diff --git a/docs/sphinx/README.rst b/docs/sphinx/README.rst deleted file mode 100644 index a7ab7c5500..0000000000 --- a/docs/sphinx/README.rst +++ /dev/null @@ -1 +0,0 @@ -TODO: how (if at all) is this actually maintained? diff --git a/docs/sphinx/conf.py b/docs/sphinx/conf.py deleted file mode 100644 index ca4b879526..0000000000 --- a/docs/sphinx/conf.py +++ /dev/null @@ -1,271 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Synapse documentation build configuration file, created by -# sphinx-quickstart on Tue Jun 10 17:31:02 2014. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath("..")) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.intersphinx", - "sphinx.ext.coverage", - "sphinx.ext.ifconfig", - "sphinxcontrib.napoleon", -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# The suffix of source filenames. -source_suffix = ".rst" - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = "index" - -# General information about the project. -project = "Synapse" -copyright = ( - "Copyright 2014-2017 OpenMarket Ltd, 2017 Vector Creations Ltd, 2017 New Vector Ltd" -) - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = "1.0" -# The full version, including alpha/beta/rc tags. -release = "1.0" - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ["_build"] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = "default" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = "Synapsedoc" - - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - #'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - #'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - #'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [("index", "Synapse.tex", "Synapse Documentation", "TNG", "manual")] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [("index", "synapse", "Synapse Documentation", ["TNG"], 1)] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - "index", - "Synapse", - "Synapse Documentation", - "TNG", - "Synapse", - "One line description of project.", - "Miscellaneous", - ) -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {"http://docs.python.org/": None} - -napoleon_include_special_with_doc = True -napoleon_use_ivar = True diff --git a/docs/sphinx/index.rst b/docs/sphinx/index.rst deleted file mode 100644 index 76a4c0c7bf..0000000000 --- a/docs/sphinx/index.rst +++ /dev/null @@ -1,20 +0,0 @@ -.. Synapse documentation master file, created by - sphinx-quickstart on Tue Jun 10 17:31:02 2014. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Welcome to Synapse's documentation! -=================================== - -Contents: - -.. toctree:: - synapse - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` - diff --git a/docs/sphinx/modules.rst b/docs/sphinx/modules.rst deleted file mode 100644 index 1c7f70bd13..0000000000 --- a/docs/sphinx/modules.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse -======= - -.. toctree:: - :maxdepth: 4 - - synapse diff --git a/docs/sphinx/synapse.api.auth.rst b/docs/sphinx/synapse.api.auth.rst deleted file mode 100644 index 931eb59836..0000000000 --- a/docs/sphinx/synapse.api.auth.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.auth module -======================= - -.. automodule:: synapse.api.auth - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.constants.rst b/docs/sphinx/synapse.api.constants.rst deleted file mode 100644 index a1e3c47f68..0000000000 --- a/docs/sphinx/synapse.api.constants.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.constants module -============================ - -.. automodule:: synapse.api.constants - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.dbobjects.rst b/docs/sphinx/synapse.api.dbobjects.rst deleted file mode 100644 index e9d31167e0..0000000000 --- a/docs/sphinx/synapse.api.dbobjects.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.dbobjects module -============================ - -.. automodule:: synapse.api.dbobjects - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.errors.rst b/docs/sphinx/synapse.api.errors.rst deleted file mode 100644 index f1c6881478..0000000000 --- a/docs/sphinx/synapse.api.errors.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.errors module -========================= - -.. automodule:: synapse.api.errors - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.event_stream.rst b/docs/sphinx/synapse.api.event_stream.rst deleted file mode 100644 index 9291cb2dbc..0000000000 --- a/docs/sphinx/synapse.api.event_stream.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.event_stream module -=============================== - -.. automodule:: synapse.api.event_stream - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.events.factory.rst b/docs/sphinx/synapse.api.events.factory.rst deleted file mode 100644 index 2e71ff6070..0000000000 --- a/docs/sphinx/synapse.api.events.factory.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.events.factory module -================================= - -.. automodule:: synapse.api.events.factory - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.events.room.rst b/docs/sphinx/synapse.api.events.room.rst deleted file mode 100644 index 6cd5998599..0000000000 --- a/docs/sphinx/synapse.api.events.room.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.events.room module -============================== - -.. automodule:: synapse.api.events.room - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.events.rst b/docs/sphinx/synapse.api.events.rst deleted file mode 100644 index b762da55ee..0000000000 --- a/docs/sphinx/synapse.api.events.rst +++ /dev/null @@ -1,18 +0,0 @@ -synapse.api.events package -========================== - -Submodules ----------- - -.. toctree:: - - synapse.api.events.factory - synapse.api.events.room - -Module contents ---------------- - -.. automodule:: synapse.api.events - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.handlers.events.rst b/docs/sphinx/synapse.api.handlers.events.rst deleted file mode 100644 index d2e1b54ac0..0000000000 --- a/docs/sphinx/synapse.api.handlers.events.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.handlers.events module -================================== - -.. automodule:: synapse.api.handlers.events - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.handlers.factory.rst b/docs/sphinx/synapse.api.handlers.factory.rst deleted file mode 100644 index b04a93f740..0000000000 --- a/docs/sphinx/synapse.api.handlers.factory.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.handlers.factory module -=================================== - -.. automodule:: synapse.api.handlers.factory - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.handlers.federation.rst b/docs/sphinx/synapse.api.handlers.federation.rst deleted file mode 100644 index 61a6542210..0000000000 --- a/docs/sphinx/synapse.api.handlers.federation.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.handlers.federation module -====================================== - -.. automodule:: synapse.api.handlers.federation - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.handlers.register.rst b/docs/sphinx/synapse.api.handlers.register.rst deleted file mode 100644 index 388f144eca..0000000000 --- a/docs/sphinx/synapse.api.handlers.register.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.handlers.register module -==================================== - -.. automodule:: synapse.api.handlers.register - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.handlers.room.rst b/docs/sphinx/synapse.api.handlers.room.rst deleted file mode 100644 index 8ca156c7ff..0000000000 --- a/docs/sphinx/synapse.api.handlers.room.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.handlers.room module -================================ - -.. automodule:: synapse.api.handlers.room - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.handlers.rst b/docs/sphinx/synapse.api.handlers.rst deleted file mode 100644 index e84f563fcb..0000000000 --- a/docs/sphinx/synapse.api.handlers.rst +++ /dev/null @@ -1,21 +0,0 @@ -synapse.api.handlers package -============================ - -Submodules ----------- - -.. toctree:: - - synapse.api.handlers.events - synapse.api.handlers.factory - synapse.api.handlers.federation - synapse.api.handlers.register - synapse.api.handlers.room - -Module contents ---------------- - -.. automodule:: synapse.api.handlers - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.notifier.rst b/docs/sphinx/synapse.api.notifier.rst deleted file mode 100644 index 631b42a497..0000000000 --- a/docs/sphinx/synapse.api.notifier.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.notifier module -=========================== - -.. automodule:: synapse.api.notifier - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.register_events.rst b/docs/sphinx/synapse.api.register_events.rst deleted file mode 100644 index 79ad4ce211..0000000000 --- a/docs/sphinx/synapse.api.register_events.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.register_events module -================================== - -.. automodule:: synapse.api.register_events - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.room_events.rst b/docs/sphinx/synapse.api.room_events.rst deleted file mode 100644 index bead1711f5..0000000000 --- a/docs/sphinx/synapse.api.room_events.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.room_events module -============================== - -.. automodule:: synapse.api.room_events - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.rst b/docs/sphinx/synapse.api.rst deleted file mode 100644 index f4d39ff331..0000000000 --- a/docs/sphinx/synapse.api.rst +++ /dev/null @@ -1,30 +0,0 @@ -synapse.api package -=================== - -Subpackages ------------ - -.. toctree:: - - synapse.api.events - synapse.api.handlers - synapse.api.streams - -Submodules ----------- - -.. toctree:: - - synapse.api.auth - synapse.api.constants - synapse.api.errors - synapse.api.notifier - synapse.api.storage - -Module contents ---------------- - -.. automodule:: synapse.api - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.server.rst b/docs/sphinx/synapse.api.server.rst deleted file mode 100644 index b01600235e..0000000000 --- a/docs/sphinx/synapse.api.server.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.server module -========================= - -.. automodule:: synapse.api.server - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.storage.rst b/docs/sphinx/synapse.api.storage.rst deleted file mode 100644 index afa40685c4..0000000000 --- a/docs/sphinx/synapse.api.storage.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.storage module -========================== - -.. automodule:: synapse.api.storage - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.stream.rst b/docs/sphinx/synapse.api.stream.rst deleted file mode 100644 index 0d5e3f01bf..0000000000 --- a/docs/sphinx/synapse.api.stream.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.stream module -========================= - -.. automodule:: synapse.api.stream - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.streams.event.rst b/docs/sphinx/synapse.api.streams.event.rst deleted file mode 100644 index 2ac45a35c8..0000000000 --- a/docs/sphinx/synapse.api.streams.event.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.api.streams.event module -================================ - -.. automodule:: synapse.api.streams.event - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.api.streams.rst b/docs/sphinx/synapse.api.streams.rst deleted file mode 100644 index 72eb205caf..0000000000 --- a/docs/sphinx/synapse.api.streams.rst +++ /dev/null @@ -1,17 +0,0 @@ -synapse.api.streams package -=========================== - -Submodules ----------- - -.. toctree:: - - synapse.api.streams.event - -Module contents ---------------- - -.. automodule:: synapse.api.streams - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.app.homeserver.rst b/docs/sphinx/synapse.app.homeserver.rst deleted file mode 100644 index 54b93da8fe..0000000000 --- a/docs/sphinx/synapse.app.homeserver.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.app.homeserver module -============================= - -.. automodule:: synapse.app.homeserver - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.app.rst b/docs/sphinx/synapse.app.rst deleted file mode 100644 index 4535b79827..0000000000 --- a/docs/sphinx/synapse.app.rst +++ /dev/null @@ -1,17 +0,0 @@ -synapse.app package -=================== - -Submodules ----------- - -.. toctree:: - - synapse.app.homeserver - -Module contents ---------------- - -.. automodule:: synapse.app - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.db.rst b/docs/sphinx/synapse.db.rst deleted file mode 100644 index 83df6c03db..0000000000 --- a/docs/sphinx/synapse.db.rst +++ /dev/null @@ -1,10 +0,0 @@ -synapse.db package -================== - -Module contents ---------------- - -.. automodule:: synapse.db - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.federation.handler.rst b/docs/sphinx/synapse.federation.handler.rst deleted file mode 100644 index 5597f5c46d..0000000000 --- a/docs/sphinx/synapse.federation.handler.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.federation.handler module -================================= - -.. automodule:: synapse.federation.handler - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.federation.messaging.rst b/docs/sphinx/synapse.federation.messaging.rst deleted file mode 100644 index 4bbaabf3ef..0000000000 --- a/docs/sphinx/synapse.federation.messaging.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.federation.messaging module -=================================== - -.. automodule:: synapse.federation.messaging - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.federation.pdu_codec.rst b/docs/sphinx/synapse.federation.pdu_codec.rst deleted file mode 100644 index 8f0b15a63c..0000000000 --- a/docs/sphinx/synapse.federation.pdu_codec.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.federation.pdu_codec module -=================================== - -.. automodule:: synapse.federation.pdu_codec - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.federation.persistence.rst b/docs/sphinx/synapse.federation.persistence.rst deleted file mode 100644 index db7ab8ade1..0000000000 --- a/docs/sphinx/synapse.federation.persistence.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.federation.persistence module -===================================== - -.. automodule:: synapse.federation.persistence - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.federation.replication.rst b/docs/sphinx/synapse.federation.replication.rst deleted file mode 100644 index 49e26e0928..0000000000 --- a/docs/sphinx/synapse.federation.replication.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.federation.replication module -===================================== - -.. automodule:: synapse.federation.replication - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.federation.rst b/docs/sphinx/synapse.federation.rst deleted file mode 100644 index 7240c7901b..0000000000 --- a/docs/sphinx/synapse.federation.rst +++ /dev/null @@ -1,22 +0,0 @@ -synapse.federation package -========================== - -Submodules ----------- - -.. toctree:: - - synapse.federation.handler - synapse.federation.pdu_codec - synapse.federation.persistence - synapse.federation.replication - synapse.federation.transport - synapse.federation.units - -Module contents ---------------- - -.. automodule:: synapse.federation - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.federation.transport.rst b/docs/sphinx/synapse.federation.transport.rst deleted file mode 100644 index 877956b3c9..0000000000 --- a/docs/sphinx/synapse.federation.transport.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.federation.transport module -=================================== - -.. automodule:: synapse.federation.transport - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.federation.units.rst b/docs/sphinx/synapse.federation.units.rst deleted file mode 100644 index 8f9212b07d..0000000000 --- a/docs/sphinx/synapse.federation.units.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.federation.units module -=============================== - -.. automodule:: synapse.federation.units - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.persistence.rst b/docs/sphinx/synapse.persistence.rst deleted file mode 100644 index 37c0c23720..0000000000 --- a/docs/sphinx/synapse.persistence.rst +++ /dev/null @@ -1,19 +0,0 @@ -synapse.persistence package -=========================== - -Submodules ----------- - -.. toctree:: - - synapse.persistence.service - synapse.persistence.tables - synapse.persistence.transactions - -Module contents ---------------- - -.. automodule:: synapse.persistence - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.persistence.service.rst b/docs/sphinx/synapse.persistence.service.rst deleted file mode 100644 index 3514d3c76f..0000000000 --- a/docs/sphinx/synapse.persistence.service.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.persistence.service module -================================== - -.. automodule:: synapse.persistence.service - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.persistence.tables.rst b/docs/sphinx/synapse.persistence.tables.rst deleted file mode 100644 index 907b02769d..0000000000 --- a/docs/sphinx/synapse.persistence.tables.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.persistence.tables module -================================= - -.. automodule:: synapse.persistence.tables - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.persistence.transactions.rst b/docs/sphinx/synapse.persistence.transactions.rst deleted file mode 100644 index 475c02a8c5..0000000000 --- a/docs/sphinx/synapse.persistence.transactions.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.persistence.transactions module -======================================= - -.. automodule:: synapse.persistence.transactions - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.rest.base.rst b/docs/sphinx/synapse.rest.base.rst deleted file mode 100644 index 84d2d9b31d..0000000000 --- a/docs/sphinx/synapse.rest.base.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.rest.base module -======================== - -.. automodule:: synapse.rest.base - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.rest.events.rst b/docs/sphinx/synapse.rest.events.rst deleted file mode 100644 index ebbe26c746..0000000000 --- a/docs/sphinx/synapse.rest.events.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.rest.events module -========================== - -.. automodule:: synapse.rest.events - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.rest.register.rst b/docs/sphinx/synapse.rest.register.rst deleted file mode 100644 index a4a48a8a8f..0000000000 --- a/docs/sphinx/synapse.rest.register.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.rest.register module -============================ - -.. automodule:: synapse.rest.register - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.rest.room.rst b/docs/sphinx/synapse.rest.room.rst deleted file mode 100644 index 63fc5c2840..0000000000 --- a/docs/sphinx/synapse.rest.room.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.rest.room module -======================== - -.. automodule:: synapse.rest.room - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.rest.rst b/docs/sphinx/synapse.rest.rst deleted file mode 100644 index 016af926b2..0000000000 --- a/docs/sphinx/synapse.rest.rst +++ /dev/null @@ -1,20 +0,0 @@ -synapse.rest package -==================== - -Submodules ----------- - -.. toctree:: - - synapse.rest.base - synapse.rest.events - synapse.rest.register - synapse.rest.room - -Module contents ---------------- - -.. automodule:: synapse.rest - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.rst b/docs/sphinx/synapse.rst deleted file mode 100644 index e7869e0e5d..0000000000 --- a/docs/sphinx/synapse.rst +++ /dev/null @@ -1,30 +0,0 @@ -synapse package -=============== - -Subpackages ------------ - -.. toctree:: - - synapse.api - synapse.app - synapse.federation - synapse.persistence - synapse.rest - synapse.util - -Submodules ----------- - -.. toctree:: - - synapse.server - synapse.state - -Module contents ---------------- - -.. automodule:: synapse - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.server.rst b/docs/sphinx/synapse.server.rst deleted file mode 100644 index 7f33f084d7..0000000000 --- a/docs/sphinx/synapse.server.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.server module -===================== - -.. automodule:: synapse.server - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.state.rst b/docs/sphinx/synapse.state.rst deleted file mode 100644 index 744be2a8be..0000000000 --- a/docs/sphinx/synapse.state.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.state module -==================== - -.. automodule:: synapse.state - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.util.async.rst b/docs/sphinx/synapse.util.async.rst deleted file mode 100644 index 542bb54444..0000000000 --- a/docs/sphinx/synapse.util.async.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.util.async module -========================= - -.. automodule:: synapse.util.async - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.util.dbutils.rst b/docs/sphinx/synapse.util.dbutils.rst deleted file mode 100644 index afaa9eb749..0000000000 --- a/docs/sphinx/synapse.util.dbutils.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.util.dbutils module -=========================== - -.. automodule:: synapse.util.dbutils - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.util.http.rst b/docs/sphinx/synapse.util.http.rst deleted file mode 100644 index 344af5a490..0000000000 --- a/docs/sphinx/synapse.util.http.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.util.http module -======================== - -.. automodule:: synapse.util.http - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.util.lockutils.rst b/docs/sphinx/synapse.util.lockutils.rst deleted file mode 100644 index 16ee26cabd..0000000000 --- a/docs/sphinx/synapse.util.lockutils.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.util.lockutils module -============================= - -.. automodule:: synapse.util.lockutils - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.util.logutils.rst b/docs/sphinx/synapse.util.logutils.rst deleted file mode 100644 index 2b79fa7a4b..0000000000 --- a/docs/sphinx/synapse.util.logutils.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.util.logutils module -============================ - -.. automodule:: synapse.util.logutils - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.util.rst b/docs/sphinx/synapse.util.rst deleted file mode 100644 index 01a0c3a591..0000000000 --- a/docs/sphinx/synapse.util.rst +++ /dev/null @@ -1,21 +0,0 @@ -synapse.util package -==================== - -Submodules ----------- - -.. toctree:: - - synapse.util.async - synapse.util.http - synapse.util.lockutils - synapse.util.logutils - synapse.util.stringutils - -Module contents ---------------- - -.. automodule:: synapse.util - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/sphinx/synapse.util.stringutils.rst b/docs/sphinx/synapse.util.stringutils.rst deleted file mode 100644 index ec626eee28..0000000000 --- a/docs/sphinx/synapse.util.stringutils.rst +++ /dev/null @@ -1,7 +0,0 @@ -synapse.util.stringutils module -=============================== - -.. automodule:: synapse.util.stringutils - :members: - :undoc-members: - :show-inheritance: diff --git a/scripts-dev/sphinx_api_docs.sh b/scripts-dev/sphinx_api_docs.sh deleted file mode 100644 index ee72b29657..0000000000 --- a/scripts-dev/sphinx_api_docs.sh +++ /dev/null @@ -1 +0,0 @@ -sphinx-apidoc -o docs/sphinx/ synapse/ -ef diff --git a/setup.cfg b/setup.cfg index a32278ea8a..f46e43fad0 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,8 +1,3 @@ -[build_sphinx] -source-dir = docs/sphinx -build-dir = docs/build -all_files = 1 - [trial] test_suite = tests From 4f0637346a194a3343b4fea6cf38c1548e56648d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 7 Oct 2020 12:03:26 +0100 Subject: [PATCH 120/134] Combine `SpamCheckerApi` with the more generic `ModuleApi`. (#8464) Lots of different module apis is not easy to maintain. Rather than adding yet another ModuleApi(hs, hs.get_auth_handler()) incantation, first add an hs.get_module_api() method and use it where possible. --- changelog.d/8464.misc | 1 + docs/spam_checker.md | 9 ++---- synapse/app/homeserver.py | 3 +- synapse/events/spamcheck.py | 5 ++-- synapse/events/third_party_rules.py | 3 +- synapse/handlers/auth.py | 7 +++++ synapse/module_api/__init__.py | 29 ++++++++++++++++++- synapse/server.py | 5 ++++ synapse/spam_checker_api/__init__.py | 43 ---------------------------- tests/module_api/test_api.py | 4 +-- 10 files changed, 51 insertions(+), 58 deletions(-) create mode 100644 changelog.d/8464.misc diff --git a/changelog.d/8464.misc b/changelog.d/8464.misc new file mode 100644 index 0000000000..a552e88f9f --- /dev/null +++ b/changelog.d/8464.misc @@ -0,0 +1 @@ +Combine `SpamCheckerApi` with the more generic `ModuleApi`. diff --git a/docs/spam_checker.md b/docs/spam_checker.md index eb10e115f9..7fc08f1b70 100644 --- a/docs/spam_checker.md +++ b/docs/spam_checker.md @@ -11,7 +11,7 @@ able to be imported by the running Synapse. The Python class is instantiated with two objects: * Any configuration (see below). -* An instance of `synapse.spam_checker_api.SpamCheckerApi`. +* An instance of `synapse.module_api.ModuleApi`. It then implements methods which return a boolean to alter behavior in Synapse. @@ -26,11 +26,8 @@ well as some specific methods: The details of the each of these methods (as well as their inputs and outputs) are documented in the `synapse.events.spamcheck.SpamChecker` class. -The `SpamCheckerApi` class provides a way for the custom spam checker class to -call back into the homeserver internals. It currently implements the following -methods: - -* `get_state_events_in_room` +The `ModuleApi` class provides a way for the custom spam checker class to +call back into the homeserver internals. ### Example diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 4ed4a2c253..2b5465417f 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -56,7 +56,6 @@ from synapse.http.server import ( from synapse.http.site import SynapseSite from synapse.logging.context import LoggingContext from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy -from synapse.module_api import ModuleApi from synapse.python_dependencies import check_requirements from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory @@ -106,7 +105,7 @@ class SynapseHomeServer(HomeServer): additional_resources = listener_config.http_options.additional_resources logger.debug("Configuring additional resources: %r", additional_resources) - module_api = ModuleApi(self, self.get_auth_handler()) + module_api = self.get_module_api() for path, resmodule in additional_resources.items(): handler_cls, config = load_module(resmodule) handler = handler_cls(config, module_api) diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index b0fc859a47..bad18f7fdf 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -17,24 +17,25 @@ import inspect from typing import Any, Dict, List, Optional, Tuple -from synapse.spam_checker_api import RegistrationBehaviour, SpamCheckerApi +from synapse.spam_checker_api import RegistrationBehaviour from synapse.types import Collection MYPY = False if MYPY: + import synapse.events import synapse.server class SpamChecker: def __init__(self, hs: "synapse.server.HomeServer"): self.spam_checkers = [] # type: List[Any] + api = hs.get_module_api() for module, config in hs.config.spam_checkers: # Older spam checkers don't accept the `api` argument, so we # try and detect support. spam_args = inspect.getfullargspec(module) if "api" in spam_args.args: - api = SpamCheckerApi(hs) self.spam_checkers.append(module(config=config, api=api)) else: self.spam_checkers.append(module(config=config)) diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index e38b8e67fb..1535cc5339 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -16,7 +16,6 @@ from typing import Callable from synapse.events import EventBase from synapse.events.snapshot import EventContext -from synapse.module_api import ModuleApi from synapse.types import Requester, StateMap @@ -40,7 +39,7 @@ class ThirdPartyEventRules: if module is not None: self.third_party_rules = module( - config=config, module_api=ModuleApi(hs, hs.get_auth_handler()), + config=config, module_api=hs.get_module_api(), ) async def check_event_allowed( diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 7c4b716b28..f6d17c53b1 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -164,7 +164,14 @@ class AuthHandler(BaseHandler): self.bcrypt_rounds = hs.config.bcrypt_rounds + # we can't use hs.get_module_api() here, because to do so will create an + # import loop. + # + # TODO: refactor this class to separate the lower-level stuff that + # ModuleApi can use from the higher-level stuff that uses ModuleApi, as + # better way to break the loop account_handler = ModuleApi(hs, self) + self.password_providers = [ module(config=config, account_handler=account_handler) for module, config in hs.config.password_providers diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 646f09d2bc..b410e3ad9c 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -14,13 +14,14 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Iterable, Optional, Tuple from twisted.internet import defer from synapse.http.client import SimpleHttpClient from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable, run_in_background +from synapse.storage.state import StateFilter from synapse.types import UserID if TYPE_CHECKING: @@ -293,6 +294,32 @@ class ModuleApi: registered_user_id, request, client_redirect_url, ) + @defer.inlineCallbacks + def get_state_events_in_room( + self, room_id: str, types: Iterable[Tuple[str, Optional[str]]] + ) -> defer.Deferred: + """Gets current state events for the given room. + + (This is exposed for compatibility with the old SpamCheckerApi. We should + probably deprecate it and replace it with an async method in a subclass.) + + Args: + room_id: The room ID to get state events in. + types: The event type and state key (using None + to represent 'any') of the room state to acquire. + + Returns: + twisted.internet.defer.Deferred[list(synapse.events.FrozenEvent)]: + The filtered state events in the room. + """ + state_ids = yield defer.ensureDeferred( + self._store.get_filtered_current_state_ids( + room_id=room_id, state_filter=StateFilter.from_types(types) + ) + ) + state = yield defer.ensureDeferred(self._store.get_events(state_ids.values())) + return state.values() + class PublicRoomListManager: """Contains methods for adding to, removing from and querying whether a room diff --git a/synapse/server.py b/synapse/server.py index aa2273955c..f83dd6148c 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -91,6 +91,7 @@ from synapse.handlers.typing import FollowerTypingHandler, TypingWriterHandler from synapse.handlers.user_directory import UserDirectoryHandler from synapse.http.client import InsecureInterceptableContextFactory, SimpleHttpClient from synapse.http.matrixfederationclient import MatrixFederationHttpClient +from synapse.module_api import ModuleApi from synapse.notifier import Notifier from synapse.push.action_generator import ActionGenerator from synapse.push.pusherpool import PusherPool @@ -656,6 +657,10 @@ class HomeServer(metaclass=abc.ABCMeta): def get_federation_ratelimiter(self) -> FederationRateLimiter: return FederationRateLimiter(self.clock, config=self.config.rc_federation) + @cache_in_self + def get_module_api(self) -> ModuleApi: + return ModuleApi(self, self.get_auth_handler()) + async def remove_pusher(self, app_id: str, push_key: str, user_id: str): return await self.get_pusherpool().remove_pusher(app_id, push_key, user_id) diff --git a/synapse/spam_checker_api/__init__.py b/synapse/spam_checker_api/__init__.py index 395ac5ab02..3ce25bb012 100644 --- a/synapse/spam_checker_api/__init__.py +++ b/synapse/spam_checker_api/__init__.py @@ -12,19 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import logging from enum import Enum -from twisted.internet import defer - -from synapse.storage.state import StateFilter - -MYPY = False -if MYPY: - import synapse.server - -logger = logging.getLogger(__name__) - class RegistrationBehaviour(Enum): """ @@ -34,35 +23,3 @@ class RegistrationBehaviour(Enum): ALLOW = "allow" SHADOW_BAN = "shadow_ban" DENY = "deny" - - -class SpamCheckerApi: - """A proxy object that gets passed to spam checkers so they can get - access to rooms and other relevant information. - """ - - def __init__(self, hs: "synapse.server.HomeServer"): - self.hs = hs - - self._store = hs.get_datastore() - - @defer.inlineCallbacks - def get_state_events_in_room(self, room_id: str, types: tuple) -> defer.Deferred: - """Gets state events for the given room. - - Args: - room_id: The room ID to get state events in. - types: The event type and state key (using None - to represent 'any') of the room state to acquire. - - Returns: - twisted.internet.defer.Deferred[list(synapse.events.FrozenEvent)]: - The filtered state events in the room. - """ - state_ids = yield defer.ensureDeferred( - self._store.get_filtered_current_state_ids( - room_id=room_id, state_filter=StateFilter.from_types(types) - ) - ) - state = yield defer.ensureDeferred(self._store.get_events(state_ids.values())) - return state.values() diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index 54600ad983..7c790bee7d 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from synapse.module_api import ModuleApi + from synapse.rest import admin from synapse.rest.client.v1 import login, room @@ -28,7 +28,7 @@ class ModuleApiTestCase(HomeserverTestCase): def prepare(self, reactor, clock, homeserver): self.store = homeserver.get_datastore() - self.module_api = ModuleApi(homeserver, homeserver.get_auth_handler()) + self.module_api = homeserver.get_module_api() def test_can_register_user(self): """Tests that an external module can register a user""" From 4cb44a158549e83d42061b02a8b704e7d5873b21 Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Wed, 7 Oct 2020 08:00:17 -0400 Subject: [PATCH 121/134] Add support for MSC2697: Dehydrated devices (#8380) This allows a user to store an offline device on the server and then restore it at a subsequent login. --- changelog.d/8380.feature | 1 + synapse/handlers/device.py | 84 ++++++++++- synapse/rest/client/v2_alpha/devices.py | 134 ++++++++++++++++++ synapse/rest/client/v2_alpha/keys.py | 37 +++-- synapse/storage/databases/main/devices.py | 78 +++++++++- .../storage/databases/main/end_to_end_keys.py | 7 +- .../storage/databases/main/registration.py | 32 ++++- .../main/schema/delta/58/11dehydration.sql | 20 +++ tests/handlers/test_device.py | 82 +++++++++++ 9 files changed, 454 insertions(+), 21 deletions(-) create mode 100644 changelog.d/8380.feature create mode 100644 synapse/storage/databases/main/schema/delta/58/11dehydration.sql diff --git a/changelog.d/8380.feature b/changelog.d/8380.feature new file mode 100644 index 0000000000..05ccea19dc --- /dev/null +++ b/changelog.d/8380.feature @@ -0,0 +1 @@ +Add support for device dehydration ([MSC2697](https://github.com/matrix-org/matrix-doc/pull/2697)). diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index b9d9098104..e883ed1e37 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd -# Copyright 2019 The Matrix.org Foundation C.I.C. +# Copyright 2019,2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Optional, Tuple from synapse.api import errors from synapse.api.constants import EventTypes @@ -29,6 +29,7 @@ from synapse.api.errors import ( from synapse.logging.opentracing import log_kv, set_tag, trace from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import ( + JsonDict, StreamToken, get_domain_from_id, get_verify_key_from_cross_signing_key, @@ -505,6 +506,85 @@ class DeviceHandler(DeviceWorkerHandler): # receive device updates. Mark this in DB. await self.store.mark_remote_user_device_list_as_unsubscribed(user_id) + async def store_dehydrated_device( + self, + user_id: str, + device_data: JsonDict, + initial_device_display_name: Optional[str] = None, + ) -> str: + """Store a dehydrated device for a user. If the user had a previous + dehydrated device, it is removed. + + Args: + user_id: the user that we are storing the device for + device_data: the dehydrated device information + initial_device_display_name: The display name to use for the device + Returns: + device id of the dehydrated device + """ + device_id = await self.check_device_registered( + user_id, None, initial_device_display_name, + ) + old_device_id = await self.store.store_dehydrated_device( + user_id, device_id, device_data + ) + if old_device_id is not None: + await self.delete_device(user_id, old_device_id) + return device_id + + async def get_dehydrated_device( + self, user_id: str + ) -> Optional[Tuple[str, JsonDict]]: + """Retrieve the information for a dehydrated device. + + Args: + user_id: the user whose dehydrated device we are looking for + Returns: + a tuple whose first item is the device ID, and the second item is + the dehydrated device information + """ + return await self.store.get_dehydrated_device(user_id) + + async def rehydrate_device( + self, user_id: str, access_token: str, device_id: str + ) -> dict: + """Process a rehydration request from the user. + + Args: + user_id: the user who is rehydrating the device + access_token: the access token used for the request + device_id: the ID of the device that will be rehydrated + Returns: + a dict containing {"success": True} + """ + success = await self.store.remove_dehydrated_device(user_id, device_id) + + if not success: + raise errors.NotFoundError() + + # If the dehydrated device was successfully deleted (the device ID + # matched the stored dehydrated device), then modify the access + # token to use the dehydrated device's ID and copy the old device + # display name to the dehydrated device, and destroy the old device + # ID + old_device_id = await self.store.set_device_for_access_token( + access_token, device_id + ) + old_device = await self.store.get_device(user_id, old_device_id) + await self.store.update_device(user_id, device_id, old_device["display_name"]) + # can't call self.delete_device because that will clobber the + # access token so call the storage layer directly + await self.store.delete_device(user_id, old_device_id) + await self.store.delete_e2e_keys_by_device( + user_id=user_id, device_id=old_device_id + ) + + # tell everyone that the old device is gone and that the dehydrated + # device has a new display name + await self.notify_device_update(user_id, [old_device_id, device_id]) + + return {"success": True} + def _update_device_from_client_ips(device, client_ips): ip = client_ips.get((device["user_id"], device["device_id"]), {}) diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py index 7e174de692..af117cb27c 100644 --- a/synapse/rest/client/v2_alpha/devices.py +++ b/synapse/rest/client/v2_alpha/devices.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -21,6 +22,7 @@ from synapse.http.servlet import ( assert_params_in_dict, parse_json_object_from_request, ) +from synapse.http.site import SynapseRequest from ._base import client_patterns, interactive_auth_handler @@ -151,7 +153,139 @@ class DeviceRestServlet(RestServlet): return 200, {} +class DehydratedDeviceServlet(RestServlet): + """Retrieve or store a dehydrated device. + + GET /org.matrix.msc2697.v2/dehydrated_device + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "device_id": "dehydrated_device_id", + "device_data": { + "algorithm": "org.matrix.msc2697.v1.dehydration.v1.olm", + "account": "dehydrated_device" + } + } + + PUT /org.matrix.msc2697/dehydrated_device + Content-Type: application/json + + { + "device_data": { + "algorithm": "org.matrix.msc2697.v1.dehydration.v1.olm", + "account": "dehydrated_device" + } + } + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "device_id": "dehydrated_device_id" + } + + """ + + PATTERNS = client_patterns("/org.matrix.msc2697.v2/dehydrated_device", releases=()) + + def __init__(self, hs): + super().__init__() + self.hs = hs + self.auth = hs.get_auth() + self.device_handler = hs.get_device_handler() + + async def on_GET(self, request: SynapseRequest): + requester = await self.auth.get_user_by_req(request) + dehydrated_device = await self.device_handler.get_dehydrated_device( + requester.user.to_string() + ) + if dehydrated_device is not None: + (device_id, device_data) = dehydrated_device + result = {"device_id": device_id, "device_data": device_data} + return (200, result) + else: + raise errors.NotFoundError("No dehydrated device available") + + async def on_PUT(self, request: SynapseRequest): + submission = parse_json_object_from_request(request) + requester = await self.auth.get_user_by_req(request) + + if "device_data" not in submission: + raise errors.SynapseError( + 400, "device_data missing", errcode=errors.Codes.MISSING_PARAM, + ) + elif not isinstance(submission["device_data"], dict): + raise errors.SynapseError( + 400, + "device_data must be an object", + errcode=errors.Codes.INVALID_PARAM, + ) + + device_id = await self.device_handler.store_dehydrated_device( + requester.user.to_string(), + submission["device_data"], + submission.get("initial_device_display_name", None), + ) + return 200, {"device_id": device_id} + + +class ClaimDehydratedDeviceServlet(RestServlet): + """Claim a dehydrated device. + + POST /org.matrix.msc2697.v2/dehydrated_device/claim + Content-Type: application/json + + { + "device_id": "dehydrated_device_id" + } + + HTTP/1.1 200 OK + Content-Type: application/json + + { + "success": true, + } + + """ + + PATTERNS = client_patterns( + "/org.matrix.msc2697.v2/dehydrated_device/claim", releases=() + ) + + def __init__(self, hs): + super().__init__() + self.hs = hs + self.auth = hs.get_auth() + self.device_handler = hs.get_device_handler() + + async def on_POST(self, request: SynapseRequest): + requester = await self.auth.get_user_by_req(request) + + submission = parse_json_object_from_request(request) + + if "device_id" not in submission: + raise errors.SynapseError( + 400, "device_id missing", errcode=errors.Codes.MISSING_PARAM, + ) + elif not isinstance(submission["device_id"], str): + raise errors.SynapseError( + 400, "device_id must be a string", errcode=errors.Codes.INVALID_PARAM, + ) + + result = await self.device_handler.rehydrate_device( + requester.user.to_string(), + self.auth.get_access_token_from_request(request), + submission["device_id"], + ) + + return (200, result) + + def register_servlets(hs, http_server): DeleteDevicesRestServlet(hs).register(http_server) DevicesRestServlet(hs).register(http_server) DeviceRestServlet(hs).register(http_server) + DehydratedDeviceServlet(hs).register(http_server) + ClaimDehydratedDeviceServlet(hs).register(http_server) diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index 55c4606569..b91996c738 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd +# Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -67,6 +68,7 @@ class KeyUploadServlet(RestServlet): super().__init__() self.auth = hs.get_auth() self.e2e_keys_handler = hs.get_e2e_keys_handler() + self.device_handler = hs.get_device_handler() @trace(opname="upload_keys") async def on_POST(self, request, device_id): @@ -75,23 +77,28 @@ class KeyUploadServlet(RestServlet): body = parse_json_object_from_request(request) if device_id is not None: - # passing the device_id here is deprecated; however, we allow it - # for now for compatibility with older clients. + # Providing the device_id should only be done for setting keys + # for dehydrated devices; however, we allow it for any device for + # compatibility with older clients. if requester.device_id is not None and device_id != requester.device_id: - set_tag("error", True) - log_kv( - { - "message": "Client uploading keys for a different device", - "logged_in_id": requester.device_id, - "key_being_uploaded": device_id, - } - ) - logger.warning( - "Client uploading keys for a different device " - "(logged in as %s, uploading for %s)", - requester.device_id, - device_id, + dehydrated_device = await self.device_handler.get_dehydrated_device( + user_id ) + if dehydrated_device is not None and device_id != dehydrated_device[0]: + set_tag("error", True) + log_kv( + { + "message": "Client uploading keys for a different device", + "logged_in_id": requester.device_id, + "key_being_uploaded": device_id, + } + ) + logger.warning( + "Client uploading keys for a different device " + "(logged in as %s, uploading for %s)", + requester.device_id, + device_id, + ) else: device_id = requester.device_id diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index fdf394c612..317d6cde95 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd -# Copyright 2019 The Matrix.org Foundation C.I.C. +# Copyright 2019,2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -33,7 +33,7 @@ from synapse.storage.database import ( make_tuple_comparison_clause, ) from synapse.types import Collection, JsonDict, get_verify_key_from_cross_signing_key -from synapse.util import json_encoder +from synapse.util import json_decoder, json_encoder from synapse.util.caches.descriptors import Cache, cached, cachedList from synapse.util.iterutils import batch_iter from synapse.util.stringutils import shortstr @@ -698,6 +698,80 @@ class DeviceWorkerStore(SQLBaseStore): _mark_remote_user_device_list_as_unsubscribed_txn, ) + async def get_dehydrated_device( + self, user_id: str + ) -> Optional[Tuple[str, JsonDict]]: + """Retrieve the information for a dehydrated device. + + Args: + user_id: the user whose dehydrated device we are looking for + Returns: + a tuple whose first item is the device ID, and the second item is + the dehydrated device information + """ + # FIXME: make sure device ID still exists in devices table + row = await self.db_pool.simple_select_one( + table="dehydrated_devices", + keyvalues={"user_id": user_id}, + retcols=["device_id", "device_data"], + allow_none=True, + ) + return ( + (row["device_id"], json_decoder.decode(row["device_data"])) if row else None + ) + + def _store_dehydrated_device_txn( + self, txn, user_id: str, device_id: str, device_data: str + ) -> Optional[str]: + old_device_id = self.db_pool.simple_select_one_onecol_txn( + txn, + table="dehydrated_devices", + keyvalues={"user_id": user_id}, + retcol="device_id", + allow_none=True, + ) + self.db_pool.simple_upsert_txn( + txn, + table="dehydrated_devices", + keyvalues={"user_id": user_id}, + values={"device_id": device_id, "device_data": device_data}, + ) + return old_device_id + + async def store_dehydrated_device( + self, user_id: str, device_id: str, device_data: JsonDict + ) -> Optional[str]: + """Store a dehydrated device for a user. + + Args: + user_id: the user that we are storing the device for + device_id: the ID of the dehydrated device + device_data: the dehydrated device information + Returns: + device id of the user's previous dehydrated device, if any + """ + return await self.db_pool.runInteraction( + "store_dehydrated_device_txn", + self._store_dehydrated_device_txn, + user_id, + device_id, + json_encoder.encode(device_data), + ) + + async def remove_dehydrated_device(self, user_id: str, device_id: str) -> bool: + """Remove a dehydrated device. + + Args: + user_id: the user that the dehydrated device belongs to + device_id: the ID of the dehydrated device + """ + count = await self.db_pool.simple_delete( + "dehydrated_devices", + {"user_id": user_id, "device_id": device_id}, + desc="remove_dehydrated_device", + ) + return count >= 1 + class DeviceBackgroundUpdateStore(SQLBaseStore): def __init__(self, database: DatabasePool, db_conn, hs): diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 8c97f2af5c..359dc6e968 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd -# Copyright 2019 The Matrix.org Foundation C.I.C. +# Copyright 2019,2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -844,6 +844,11 @@ class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): self._invalidate_cache_and_stream( txn, self.count_e2e_one_time_keys, (user_id, device_id) ) + self.db_pool.simple_delete_txn( + txn, + table="dehydrated_devices", + keyvalues={"user_id": user_id, "device_id": device_id}, + ) self.db_pool.simple_delete_txn( txn, table="e2e_fallback_keys_json", diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index a83df7759d..16ba545740 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017-2018 New Vector Ltd -# Copyright 2019 The Matrix.org Foundation C.I.C. +# Copyright 2019,2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -964,6 +964,36 @@ class RegistrationStore(RegistrationBackgroundUpdateStore): desc="add_access_token_to_user", ) + def _set_device_for_access_token_txn(self, txn, token: str, device_id: str) -> str: + old_device_id = self.db_pool.simple_select_one_onecol_txn( + txn, "access_tokens", {"token": token}, "device_id" + ) + + self.db_pool.simple_update_txn( + txn, "access_tokens", {"token": token}, {"device_id": device_id} + ) + + self._invalidate_cache_and_stream(txn, self.get_user_by_access_token, (token,)) + + return old_device_id + + async def set_device_for_access_token(self, token: str, device_id: str) -> str: + """Sets the device ID associated with an access token. + + Args: + token: The access token to modify. + device_id: The new device ID. + Returns: + The old device ID associated with the access token. + """ + + return await self.db_pool.runInteraction( + "set_device_for_access_token", + self._set_device_for_access_token_txn, + token, + device_id, + ) + async def register_user( self, user_id: str, diff --git a/synapse/storage/databases/main/schema/delta/58/11dehydration.sql b/synapse/storage/databases/main/schema/delta/58/11dehydration.sql new file mode 100644 index 0000000000..7851a0a825 --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/58/11dehydration.sql @@ -0,0 +1,20 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE IF NOT EXISTS dehydrated_devices( + user_id TEXT NOT NULL PRIMARY KEY, + device_id TEXT NOT NULL, + device_data TEXT NOT NULL -- JSON-encoded client-defined data +); diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 969d44c787..4512c51311 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd +# Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -224,3 +225,84 @@ class DeviceTestCase(unittest.HomeserverTestCase): ) ) self.reactor.advance(1000) + + +class DehydrationTestCase(unittest.HomeserverTestCase): + def make_homeserver(self, reactor, clock): + hs = self.setup_test_homeserver("server", http_client=None) + self.handler = hs.get_device_handler() + self.registration = hs.get_registration_handler() + self.auth = hs.get_auth() + self.store = hs.get_datastore() + return hs + + def test_dehydrate_and_rehydrate_device(self): + user_id = "@boris:dehydration" + + self.get_success(self.store.register_user(user_id, "foobar")) + + # First check if we can store and fetch a dehydrated device + stored_dehydrated_device_id = self.get_success( + self.handler.store_dehydrated_device( + user_id=user_id, + device_data={"device_data": {"foo": "bar"}}, + initial_device_display_name="dehydrated device", + ) + ) + + retrieved_device_id, device_data = self.get_success( + self.handler.get_dehydrated_device(user_id=user_id) + ) + + self.assertEqual(retrieved_device_id, stored_dehydrated_device_id) + self.assertEqual(device_data, {"device_data": {"foo": "bar"}}) + + # Create a new login for the user and dehydrated the device + device_id, access_token = self.get_success( + self.registration.register_device( + user_id=user_id, device_id=None, initial_display_name="new device", + ) + ) + + # Trying to claim a nonexistent device should throw an error + self.get_failure( + self.handler.rehydrate_device( + user_id=user_id, + access_token=access_token, + device_id="not the right device ID", + ), + synapse.api.errors.NotFoundError, + ) + + # dehydrating the right devices should succeed and change our device ID + # to the dehydrated device's ID + res = self.get_success( + self.handler.rehydrate_device( + user_id=user_id, + access_token=access_token, + device_id=retrieved_device_id, + ) + ) + + self.assertEqual(res, {"success": True}) + + # make sure that our device ID has changed + user_info = self.get_success(self.auth.get_user_by_access_token(access_token)) + + self.assertEqual(user_info["device_id"], retrieved_device_id) + + # make sure the device has the display name that was set from the login + res = self.get_success(self.handler.get_device(user_id, retrieved_device_id)) + + self.assertEqual(res["display_name"], "new device") + + # make sure that the device ID that we were initially assigned no longer exists + self.get_failure( + self.handler.get_device(user_id, device_id), + synapse.api.errors.NotFoundError, + ) + + # make sure that there's no device available for dehydrating now + ret = self.get_success(self.handler.get_dehydrated_device(user_id=user_id)) + + self.assertIsNone(ret) From d373ec2f72f69ae6c394482df89061302be41405 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 7 Oct 2020 13:39:50 +0100 Subject: [PATCH 122/134] unblacklist some tests (#8474) It seems most of these blacklisted tests do actually pass most of the time. I'm of the opinion that having them blacklisted here means there is very little incentive for us to deflake any flaky tests, and meanwhile any value in those tests is completely lost. --- .buildkite/worker-blacklist | 31 ------------------------------- changelog.d/8474.misc | 1 + sytest-blacklist | 3 --- 3 files changed, 1 insertion(+), 34 deletions(-) create mode 100644 changelog.d/8474.misc diff --git a/.buildkite/worker-blacklist b/.buildkite/worker-blacklist index fd98cbbaf6..5975cb98cf 100644 --- a/.buildkite/worker-blacklist +++ b/.buildkite/worker-blacklist @@ -1,41 +1,10 @@ # This file serves as a blacklist for SyTest tests that we expect will fail in # Synapse when run under worker mode. For more details, see sytest-blacklist. -Message history can be paginated - Can re-join room if re-invited -The only membership state included in an initial sync is for all the senders in the timeline - -Local device key changes get to remote servers - -If remote user leaves room we no longer receive device updates - -Forgotten room messages cannot be paginated - -Inbound federation can get public room list - -Members from the gap are included in gappy incr LL sync - -Leaves are present in non-gapped incremental syncs - -Old leaves are present in gapped incremental syncs - -User sees updates to presence from other users in the incremental sync. - -Gapped incremental syncs include all state changes - -Old members are included in gappy incr LL sync if they start speaking - # new failures as of https://github.com/matrix-org/sytest/pull/732 Device list doesn't change if remote server is down -Remote servers cannot set power levels in rooms without existing powerlevels -Remote servers should reject attempts by non-creators to set the power levels # https://buildkite.com/matrix-dot-org/synapse/builds/6134#6f67bf47-e234-474d-80e8-c6e1868b15c5 Server correctly handles incoming m.device_list_update - -# this fails reliably with a torture level of 100 due to https://github.com/matrix-org/synapse/issues/6536 -Outbound federation requests missing prev_events and then asks for /state_ids and resolves the state - -Can get rooms/{roomId}/members at a given point diff --git a/changelog.d/8474.misc b/changelog.d/8474.misc new file mode 100644 index 0000000000..65e329a6e3 --- /dev/null +++ b/changelog.d/8474.misc @@ -0,0 +1 @@ +Unblacklist some sytests. diff --git a/sytest-blacklist b/sytest-blacklist index b563448016..de9986357b 100644 --- a/sytest-blacklist +++ b/sytest-blacklist @@ -34,9 +34,6 @@ New federated private chats get full presence information (SYN-115) # this requirement from the spec Inbound federation of state requires event_id as a mandatory paramater -# Blacklisted until https://github.com/matrix-org/synapse/pull/6486 lands -Can upload self-signing keys - # Blacklisted until MSC2753 is implemented Local users can peek into world_readable rooms by room ID We can't peek into rooms with shared history_visibility From d9b55bd830e47fdbae0054afd0035342bb21c76e Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 7 Oct 2020 08:48:54 -0400 Subject: [PATCH 123/134] Add Ubuntu 20.10 (Groovy Gorilla) to build scripts. (#8475) --- changelog.d/8475.misc | 1 + scripts-dev/build_debian_packages | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/8475.misc diff --git a/changelog.d/8475.misc b/changelog.d/8475.misc new file mode 100644 index 0000000000..69bcb04097 --- /dev/null +++ b/changelog.d/8475.misc @@ -0,0 +1 @@ +Add Groovy Gorilla to the list of distributions we build `.deb`s for. diff --git a/scripts-dev/build_debian_packages b/scripts-dev/build_debian_packages index d055cf3287..d0685c8b35 100755 --- a/scripts-dev/build_debian_packages +++ b/scripts-dev/build_debian_packages @@ -25,6 +25,7 @@ DISTS = ( "ubuntu:xenial", "ubuntu:bionic", "ubuntu:focal", + "ubuntu:groovy", ) DESC = '''\ From 9ca6341969b8b84c0c79a29fb914d1d8dbb3e320 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 7 Oct 2020 13:49:40 +0100 Subject: [PATCH 124/134] Fix returning incorrect prev_batch token in incremental sync (#8486) --- changelog.d/8486.bugfix | 1 + synapse/handlers/sync.py | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8486.bugfix diff --git a/changelog.d/8486.bugfix b/changelog.d/8486.bugfix new file mode 100644 index 0000000000..63fc091ba6 --- /dev/null +++ b/changelog.d/8486.bugfix @@ -0,0 +1 @@ +Fix incremental sync returning an incorrect `prev_batch` token in timeline section, which when used to paginate returned events that were included in the incremental sync. Broken since v0.16.0. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index dd1f90e359..6fb8332f93 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -460,8 +460,13 @@ class SyncHandler: recents = [] if not limited or block_all_timeline: + prev_batch_token = now_token + if recents: + room_key = recents[0].internal_metadata.before + prev_batch_token = now_token.copy_and_replace("room_key", room_key) + return TimelineBatch( - events=recents, prev_batch=now_token, limited=False + events=recents, prev_batch=prev_batch_token, limited=False ) filtering_factor = 2 From b460a088c647a6d3ea0e5a9f4f80d86bb9e303b3 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 7 Oct 2020 08:58:21 -0400 Subject: [PATCH 125/134] Add typing information to the device handler. (#8407) --- changelog.d/8407.misc | 1 + mypy.ini | 1 + synapse/handlers/device.py | 89 ++++++++++++++--------- synapse/storage/databases/main/devices.py | 6 +- 4 files changed, 59 insertions(+), 38 deletions(-) create mode 100644 changelog.d/8407.misc diff --git a/changelog.d/8407.misc b/changelog.d/8407.misc new file mode 100644 index 0000000000..d37002d75b --- /dev/null +++ b/changelog.d/8407.misc @@ -0,0 +1 @@ +Add typing information to the device handler. diff --git a/mypy.ini b/mypy.ini index e84ad04e41..a7ffb81ef1 100644 --- a/mypy.ini +++ b/mypy.ini @@ -17,6 +17,7 @@ files = synapse/federation, synapse/handlers/auth.py, synapse/handlers/cas_handler.py, + synapse/handlers/device.py, synapse/handlers/directory.py, synapse/handlers/events.py, synapse/handlers/federation.py, diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index e883ed1e37..debb1b4f29 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import Any, Dict, List, Optional, Tuple +from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple from synapse.api import errors from synapse.api.constants import EventTypes @@ -29,8 +29,10 @@ from synapse.api.errors import ( from synapse.logging.opentracing import log_kv, set_tag, trace from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import ( + Collection, JsonDict, StreamToken, + UserID, get_domain_from_id, get_verify_key_from_cross_signing_key, ) @@ -42,13 +44,16 @@ from synapse.util.retryutils import NotRetryingDestination from ._base import BaseHandler +if TYPE_CHECKING: + from synapse.app.homeserver import HomeServer + logger = logging.getLogger(__name__) MAX_DEVICE_DISPLAY_NAME_LEN = 100 class DeviceWorkerHandler(BaseHandler): - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): super().__init__(hs) self.hs = hs @@ -106,7 +111,9 @@ class DeviceWorkerHandler(BaseHandler): @trace @measure_func("device.get_user_ids_changed") - async def get_user_ids_changed(self, user_id: str, from_token: StreamToken): + async def get_user_ids_changed( + self, user_id: str, from_token: StreamToken + ) -> JsonDict: """Get list of users that have had the devices updated, or have newly joined a room, that `user_id` may be interested in. """ @@ -222,8 +229,8 @@ class DeviceWorkerHandler(BaseHandler): possibly_joined = possibly_changed & users_who_share_room possibly_left = (possibly_changed | possibly_left) - users_who_share_room else: - possibly_joined = [] - possibly_left = [] + possibly_joined = set() + possibly_left = set() result = {"changed": list(possibly_joined), "left": list(possibly_left)} @@ -231,7 +238,7 @@ class DeviceWorkerHandler(BaseHandler): return result - async def on_federation_query_user_devices(self, user_id): + async def on_federation_query_user_devices(self, user_id: str) -> JsonDict: stream_id, devices = await self.store.get_e2e_device_keys_for_federation_query( user_id ) @@ -250,7 +257,7 @@ class DeviceWorkerHandler(BaseHandler): class DeviceHandler(DeviceWorkerHandler): - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): super().__init__(hs) self.federation_sender = hs.get_federation_sender() @@ -265,7 +272,7 @@ class DeviceHandler(DeviceWorkerHandler): hs.get_distributor().observe("user_left_room", self.user_left_room) - def _check_device_name_length(self, name: str): + def _check_device_name_length(self, name: Optional[str]): """ Checks whether a device name is longer than the maximum allowed length. @@ -284,8 +291,11 @@ class DeviceHandler(DeviceWorkerHandler): ) async def check_device_registered( - self, user_id, device_id, initial_device_display_name=None - ): + self, + user_id: str, + device_id: Optional[str], + initial_device_display_name: Optional[str] = None, + ) -> str: """ If the given device has not been registered, register it with the supplied display name. @@ -293,12 +303,11 @@ class DeviceHandler(DeviceWorkerHandler): If no device_id is supplied, we make one up. Args: - user_id (str): @user:id - device_id (str | None): device id supplied by client - initial_device_display_name (str | None): device display name from - client + user_id: @user:id + device_id: device id supplied by client + initial_device_display_name: device display name from client Returns: - str: device id (generated if none was supplied) + device id (generated if none was supplied) """ self._check_device_name_length(initial_device_display_name) @@ -317,15 +326,15 @@ class DeviceHandler(DeviceWorkerHandler): # times in case of a clash. attempts = 0 while attempts < 5: - device_id = stringutils.random_string(10).upper() + new_device_id = stringutils.random_string(10).upper() new_device = await self.store.store_device( user_id=user_id, - device_id=device_id, + device_id=new_device_id, initial_device_display_name=initial_device_display_name, ) if new_device: - await self.notify_device_update(user_id, [device_id]) - return device_id + await self.notify_device_update(user_id, [new_device_id]) + return new_device_id attempts += 1 raise errors.StoreError(500, "Couldn't generate a device ID.") @@ -434,7 +443,9 @@ class DeviceHandler(DeviceWorkerHandler): @trace @measure_func("notify_device_update") - async def notify_device_update(self, user_id, device_ids): + async def notify_device_update( + self, user_id: str, device_ids: Collection[str] + ) -> None: """Notify that a user's device(s) has changed. Pokes the notifier, and remote servers if the user is local. """ @@ -446,7 +457,7 @@ class DeviceHandler(DeviceWorkerHandler): user_id ) - hosts = set() + hosts = set() # type: Set[str] if self.hs.is_mine_id(user_id): hosts.update(get_domain_from_id(u) for u in users_who_share_room) hosts.discard(self.server_name) @@ -498,7 +509,7 @@ class DeviceHandler(DeviceWorkerHandler): self.notifier.on_new_event("device_list_key", position, users=[from_user_id]) - async def user_left_room(self, user, room_id): + async def user_left_room(self, user: UserID, room_id: str) -> None: user_id = user.to_string() room_ids = await self.store.get_rooms_for_user(user_id) if not room_ids: @@ -586,7 +597,9 @@ class DeviceHandler(DeviceWorkerHandler): return {"success": True} -def _update_device_from_client_ips(device, client_ips): +def _update_device_from_client_ips( + device: Dict[str, Any], client_ips: Dict[Tuple[str, str], Dict[str, Any]] +) -> None: ip = client_ips.get((device["user_id"], device["device_id"]), {}) device.update({"last_seen_ts": ip.get("last_seen"), "last_seen_ip": ip.get("ip")}) @@ -594,7 +607,7 @@ def _update_device_from_client_ips(device, client_ips): class DeviceListUpdater: "Handles incoming device list updates from federation and updates the DB" - def __init__(self, hs, device_handler): + def __init__(self, hs: "HomeServer", device_handler: DeviceHandler): self.store = hs.get_datastore() self.federation = hs.get_federation_client() self.clock = hs.get_clock() @@ -603,7 +616,9 @@ class DeviceListUpdater: self._remote_edu_linearizer = Linearizer(name="remote_device_list") # user_id -> list of updates waiting to be handled. - self._pending_updates = {} + self._pending_updates = ( + {} + ) # type: Dict[str, List[Tuple[str, str, Iterable[str], JsonDict]]] # Recently seen stream ids. We don't bother keeping these in the DB, # but they're useful to have them about to reduce the number of spurious @@ -626,7 +641,9 @@ class DeviceListUpdater: ) @trace - async def incoming_device_list_update(self, origin, edu_content): + async def incoming_device_list_update( + self, origin: str, edu_content: JsonDict + ) -> None: """Called on incoming device list update from federation. Responsible for parsing the EDU and adding to pending updates list. """ @@ -687,7 +704,7 @@ class DeviceListUpdater: await self._handle_device_updates(user_id) @measure_func("_incoming_device_list_update") - async def _handle_device_updates(self, user_id): + async def _handle_device_updates(self, user_id: str) -> None: "Actually handle pending updates." with (await self._remote_edu_linearizer.queue(user_id)): @@ -735,7 +752,9 @@ class DeviceListUpdater: stream_id for _, stream_id, _, _ in pending_updates ) - async def _need_to_do_resync(self, user_id, updates): + async def _need_to_do_resync( + self, user_id: str, updates: Iterable[Tuple[str, str, Iterable[str], JsonDict]] + ) -> bool: """Given a list of updates for a user figure out if we need to do a full resync, or whether we have enough data that we can just apply the delta. """ @@ -766,7 +785,7 @@ class DeviceListUpdater: return False @trace - async def _maybe_retry_device_resync(self): + async def _maybe_retry_device_resync(self) -> None: """Retry to resync device lists that are out of sync, except if another retry is in progress. """ @@ -809,7 +828,7 @@ class DeviceListUpdater: async def user_device_resync( self, user_id: str, mark_failed_as_stale: bool = True - ) -> Optional[dict]: + ) -> Optional[JsonDict]: """Fetches all devices for a user and updates the device cache with them. Args: @@ -833,7 +852,7 @@ class DeviceListUpdater: # it later. await self.store.mark_remote_user_device_cache_as_stale(user_id) - return + return None except (RequestSendFailed, HttpResponseException) as e: logger.warning( "Failed to handle device list update for %s: %s", user_id, e, @@ -850,12 +869,12 @@ class DeviceListUpdater: # next time we get a device list update for this user_id. # This makes it more likely that the device lists will # eventually become consistent. - return + return None except FederationDeniedError as e: set_tag("error", True) log_kv({"reason": "FederationDeniedError"}) logger.info(e) - return + return None except Exception as e: set_tag("error", True) log_kv( @@ -868,7 +887,7 @@ class DeviceListUpdater: # it later. await self.store.mark_remote_user_device_cache_as_stale(user_id) - return + return None log_kv({"result": result}) stream_id = result["stream_id"] devices = result["devices"] @@ -929,7 +948,7 @@ class DeviceListUpdater: user_id: str, master_key: Optional[Dict[str, Any]], self_signing_key: Optional[Dict[str, Any]], - ) -> list: + ) -> List[str]: """Process the given new master and self-signing key for the given remote user. Args: diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 317d6cde95..2d0a6408b5 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -911,7 +911,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): self._clock.looping_call(self._prune_old_outbound_device_pokes, 60 * 60 * 1000) async def store_device( - self, user_id: str, device_id: str, initial_device_display_name: str + self, user_id: str, device_id: str, initial_device_display_name: Optional[str] ) -> bool: """Ensure the given device is known; add it to the store if not @@ -1029,7 +1029,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): ) async def update_remote_device_list_cache_entry( - self, user_id: str, device_id: str, content: JsonDict, stream_id: int + self, user_id: str, device_id: str, content: JsonDict, stream_id: str ) -> None: """Updates a single device in the cache of a remote user's devicelist. @@ -1057,7 +1057,7 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore): user_id: str, device_id: str, content: JsonDict, - stream_id: int, + stream_id: str, ) -> None: if content.get("deleted"): self.db_pool.simple_delete_txn( From 52a50e8686ec9af6c629004171748f41eae09f73 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 7 Oct 2020 15:15:33 +0100 Subject: [PATCH 126/134] Use vector clocks for room stream tokens. (#8439) Currently when using multiple event persisters we (in the worst case) don't tell clients about events until all event persisters have persisted new events after the original event. This is a suboptimal, especially if one of the event persisters goes down. To handle this, we encode the position of each event persister in the room tokens so that we can send events to clients immediately. To reduce the size of the token we do two things: 1. We create a unique immutable persistent mapping between instance names and a generated small integer ID, which we can encode in the tokens instead of the instance name; and 2. We encode the "persisted upto position" of the room token and then only explicitly include instances that have positions strictly greater than that. The new tokens look something like: `m3478~1.3488~2.3489`, where the first number is the min position, and the subsequent `-` separated pairs are the instance ID to positions map. (We use `.` and `~` as separators as they're URL safe and not already used by `StreamToken`). --- changelog.d/8439.misc | 1 + .../delta/58/19instance_map.sql.postgres | 25 ++ synapse/storage/databases/main/stream.py | 276 +++++++++++++++--- synapse/types.py | 116 +++++++- 4 files changed, 378 insertions(+), 40 deletions(-) create mode 100644 changelog.d/8439.misc create mode 100644 synapse/storage/databases/main/schema/delta/58/19instance_map.sql.postgres diff --git a/changelog.d/8439.misc b/changelog.d/8439.misc new file mode 100644 index 0000000000..237cb3b311 --- /dev/null +++ b/changelog.d/8439.misc @@ -0,0 +1 @@ +Allow events to be sent to clients sooner when using sharded event persisters. diff --git a/synapse/storage/databases/main/schema/delta/58/19instance_map.sql.postgres b/synapse/storage/databases/main/schema/delta/58/19instance_map.sql.postgres new file mode 100644 index 0000000000..841186b826 --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/58/19instance_map.sql.postgres @@ -0,0 +1,25 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +-- A unique and immutable mapping between instance name and an integer ID. This +-- lets us refer to instances via a small ID in e.g. stream tokens, without +-- having to encode the full name. +CREATE TABLE IF NOT EXISTS instance_map ( + instance_id SERIAL PRIMARY KEY, + instance_name TEXT NOT NULL +); + +CREATE UNIQUE INDEX IF NOT EXISTS instance_map_idx ON instance_map(instance_name); diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index a94bec1ac5..e3b9ff5ca6 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -53,7 +53,9 @@ from synapse.storage.database import ( ) from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine +from synapse.storage.util.id_generators import MultiWriterIdGenerator from synapse.types import Collection, PersistedEventPosition, RoomStreamToken +from synapse.util.caches.descriptors import cached from synapse.util.caches.stream_change_cache import StreamChangeCache if TYPE_CHECKING: @@ -208,6 +210,55 @@ def _make_generic_sql_bound( ) +def _filter_results( + lower_token: Optional[RoomStreamToken], + upper_token: Optional[RoomStreamToken], + instance_name: str, + topological_ordering: int, + stream_ordering: int, +) -> bool: + """Returns True if the event persisted by the given instance at the given + topological/stream_ordering falls between the two tokens (taking a None + token to mean unbounded). + + Used to filter results from fetching events in the DB against the given + tokens. This is necessary to handle the case where the tokens include + position maps, which we handle by fetching more than necessary from the DB + and then filtering (rather than attempting to construct a complicated SQL + query). + """ + + event_historical_tuple = ( + topological_ordering, + stream_ordering, + ) + + if lower_token: + if lower_token.topological is not None: + # If these are historical tokens we compare the `(topological, stream)` + # tuples. + if event_historical_tuple <= lower_token.as_historical_tuple(): + return False + + else: + # If these are live tokens we compare the stream ordering against the + # writers stream position. + if stream_ordering <= lower_token.get_stream_pos_for_instance( + instance_name + ): + return False + + if upper_token: + if upper_token.topological is not None: + if upper_token.as_historical_tuple() < event_historical_tuple: + return False + else: + if upper_token.get_stream_pos_for_instance(instance_name) < stream_ordering: + return False + + return True + + def filter_to_clause(event_filter: Optional[Filter]) -> Tuple[str, List[str]]: # NB: This may create SQL clauses that don't optimise well (and we don't # have indices on all possible clauses). E.g. it may create @@ -305,7 +356,31 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore, metaclass=abc.ABCMeta): raise NotImplementedError() def get_room_max_token(self) -> RoomStreamToken: - return RoomStreamToken(None, self.get_room_max_stream_ordering()) + """Get a `RoomStreamToken` that marks the current maximum persisted + position of the events stream. Useful to get a token that represents + "now". + + The token returned is a "live" token that may have an instance_map + component. + """ + + min_pos = self._stream_id_gen.get_current_token() + + positions = {} + if isinstance(self._stream_id_gen, MultiWriterIdGenerator): + # The `min_pos` is the minimum position that we know all instances + # have finished persisting to, so we only care about instances whose + # positions are ahead of that. (Instance positions can be behind the + # min position as there are times we can work out that the minimum + # position is ahead of the naive minimum across all current + # positions. See MultiWriterIdGenerator for details) + positions = { + i: p + for i, p in self._stream_id_gen.get_positions().items() + if p > min_pos + } + + return RoomStreamToken(None, min_pos, positions) async def get_room_events_stream_for_rooms( self, @@ -404,25 +479,43 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore, metaclass=abc.ABCMeta): if from_key == to_key: return [], from_key - from_id = from_key.stream - to_id = to_key.stream - - has_changed = self._events_stream_cache.has_entity_changed(room_id, from_id) + has_changed = self._events_stream_cache.has_entity_changed( + room_id, from_key.stream + ) if not has_changed: return [], from_key def f(txn): - sql = ( - "SELECT event_id, stream_ordering FROM events WHERE" - " room_id = ?" - " AND not outlier" - " AND stream_ordering > ? AND stream_ordering <= ?" - " ORDER BY stream_ordering %s LIMIT ?" - ) % (order,) - txn.execute(sql, (room_id, from_id, to_id, limit)) + # To handle tokens with a non-empty instance_map we fetch more + # results than necessary and then filter down + min_from_id = from_key.stream + max_to_id = to_key.get_max_stream_pos() - rows = [_EventDictReturn(row[0], None, row[1]) for row in txn] + sql = """ + SELECT event_id, instance_name, topological_ordering, stream_ordering + FROM events + WHERE + room_id = ? + AND not outlier + AND stream_ordering > ? AND stream_ordering <= ? + ORDER BY stream_ordering %s LIMIT ? + """ % ( + order, + ) + txn.execute(sql, (room_id, min_from_id, max_to_id, 2 * limit)) + + rows = [ + _EventDictReturn(event_id, None, stream_ordering) + for event_id, instance_name, topological_ordering, stream_ordering in txn + if _filter_results( + from_key, + to_key, + instance_name, + topological_ordering, + stream_ordering, + ) + ][:limit] return rows rows = await self.db_pool.runInteraction("get_room_events_stream_for_room", f) @@ -431,7 +524,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore, metaclass=abc.ABCMeta): [r.event_id for r in rows], get_prev_content=True ) - self._set_before_and_after(ret, rows, topo_order=from_id is None) + self._set_before_and_after(ret, rows, topo_order=False) if order.lower() == "desc": ret.reverse() @@ -448,31 +541,43 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore, metaclass=abc.ABCMeta): async def get_membership_changes_for_user( self, user_id: str, from_key: RoomStreamToken, to_key: RoomStreamToken ) -> List[EventBase]: - from_id = from_key.stream - to_id = to_key.stream - if from_key == to_key: return [] - if from_id: + if from_key: has_changed = self._membership_stream_cache.has_entity_changed( - user_id, int(from_id) + user_id, int(from_key.stream) ) if not has_changed: return [] def f(txn): - sql = ( - "SELECT m.event_id, stream_ordering FROM events AS e," - " room_memberships AS m" - " WHERE e.event_id = m.event_id" - " AND m.user_id = ?" - " AND e.stream_ordering > ? AND e.stream_ordering <= ?" - " ORDER BY e.stream_ordering ASC" - ) - txn.execute(sql, (user_id, from_id, to_id)) + # To handle tokens with a non-empty instance_map we fetch more + # results than necessary and then filter down + min_from_id = from_key.stream + max_to_id = to_key.get_max_stream_pos() - rows = [_EventDictReturn(row[0], None, row[1]) for row in txn] + sql = """ + SELECT m.event_id, instance_name, topological_ordering, stream_ordering + FROM events AS e, room_memberships AS m + WHERE e.event_id = m.event_id + AND m.user_id = ? + AND e.stream_ordering > ? AND e.stream_ordering <= ? + ORDER BY e.stream_ordering ASC + """ + txn.execute(sql, (user_id, min_from_id, max_to_id,)) + + rows = [ + _EventDictReturn(event_id, None, stream_ordering) + for event_id, instance_name, topological_ordering, stream_ordering in txn + if _filter_results( + from_key, + to_key, + instance_name, + topological_ordering, + stream_ordering, + ) + ] return rows @@ -966,11 +1071,46 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore, metaclass=abc.ABCMeta): else: order = "ASC" + # The bounds for the stream tokens are complicated by the fact + # that we need to handle the instance_map part of the tokens. We do this + # by fetching all events between the min stream token and the maximum + # stream token (as returned by `RoomStreamToken.get_max_stream_pos`) and + # then filtering the results. + if from_token.topological is not None: + from_bound = ( + from_token.as_historical_tuple() + ) # type: Tuple[Optional[int], int] + elif direction == "b": + from_bound = ( + None, + from_token.get_max_stream_pos(), + ) + else: + from_bound = ( + None, + from_token.stream, + ) + + to_bound = None # type: Optional[Tuple[Optional[int], int]] + if to_token: + if to_token.topological is not None: + to_bound = to_token.as_historical_tuple() + elif direction == "b": + to_bound = ( + None, + to_token.stream, + ) + else: + to_bound = ( + None, + to_token.get_max_stream_pos(), + ) + bounds = generate_pagination_where_clause( direction=direction, column_names=("topological_ordering", "stream_ordering"), - from_token=from_token.as_tuple(), - to_token=to_token.as_tuple() if to_token else None, + from_token=from_bound, + to_token=to_bound, engine=self.database_engine, ) @@ -980,7 +1120,8 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore, metaclass=abc.ABCMeta): bounds += " AND " + filter_clause args.extend(filter_args) - args.append(int(limit)) + # We fetch more events as we'll filter the result set + args.append(int(limit) * 2) select_keywords = "SELECT" join_clause = "" @@ -1002,7 +1143,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore, metaclass=abc.ABCMeta): select_keywords += "DISTINCT" sql = """ - %(select_keywords)s event_id, topological_ordering, stream_ordering + %(select_keywords)s + event_id, instance_name, + topological_ordering, stream_ordering FROM events %(join_clause)s WHERE outlier = ? AND room_id = ? AND %(bounds)s @@ -1017,7 +1160,18 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore, metaclass=abc.ABCMeta): txn.execute(sql, args) - rows = [_EventDictReturn(row[0], row[1], row[2]) for row in txn] + # Filter the result set. + rows = [ + _EventDictReturn(event_id, topological_ordering, stream_ordering) + for event_id, instance_name, topological_ordering, stream_ordering in txn + if _filter_results( + lower_token=to_token if direction == "b" else from_token, + upper_token=from_token if direction == "b" else to_token, + instance_name=instance_name, + topological_ordering=topological_ordering, + stream_ordering=stream_ordering, + ) + ][:limit] if rows: topo = rows[-1].topological_ordering @@ -1082,6 +1236,58 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore, metaclass=abc.ABCMeta): return (events, token) + @cached() + async def get_id_for_instance(self, instance_name: str) -> int: + """Get a unique, immutable ID that corresponds to the given Synapse worker instance. + """ + + def _get_id_for_instance_txn(txn): + instance_id = self.db_pool.simple_select_one_onecol_txn( + txn, + table="instance_map", + keyvalues={"instance_name": instance_name}, + retcol="instance_id", + allow_none=True, + ) + if instance_id is not None: + return instance_id + + # If we don't have an entry upsert one. + # + # We could do this before the first check, and rely on the cache for + # efficiency, but each UPSERT causes the next ID to increment which + # can quickly bloat the size of the generated IDs for new instances. + self.db_pool.simple_upsert_txn( + txn, + table="instance_map", + keyvalues={"instance_name": instance_name}, + values={}, + ) + + return self.db_pool.simple_select_one_onecol_txn( + txn, + table="instance_map", + keyvalues={"instance_name": instance_name}, + retcol="instance_id", + ) + + return await self.db_pool.runInteraction( + "get_id_for_instance", _get_id_for_instance_txn + ) + + @cached() + async def get_name_from_instance_id(self, instance_id: int) -> str: + """Get the instance name from an ID previously returned by + `get_id_for_instance`. + """ + + return await self.db_pool.simple_select_one_onecol( + table="instance_map", + keyvalues={"instance_id": instance_id}, + retcol="instance_name", + desc="get_name_from_instance_id", + ) + class StreamStore(StreamWorkerStore): def get_room_max_stream_ordering(self) -> int: diff --git a/synapse/types.py b/synapse/types.py index bd271f9f16..5bde67cc07 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -22,6 +22,7 @@ from typing import ( TYPE_CHECKING, Any, Dict, + Iterable, Mapping, MutableMapping, Optional, @@ -43,7 +44,7 @@ if TYPE_CHECKING: if sys.version_info[:3] >= (3, 6, 0): from typing import Collection else: - from typing import Container, Iterable, Sized + from typing import Container, Sized T_co = TypeVar("T_co", covariant=True) @@ -375,7 +376,7 @@ def map_username_to_mxid_localpart(username, case_sensitive=False): return username.decode("ascii") -@attr.s(frozen=True, slots=True) +@attr.s(frozen=True, slots=True, cmp=False) class RoomStreamToken: """Tokens are positions between events. The token "s1" comes after event 1. @@ -397,6 +398,31 @@ class RoomStreamToken: event it comes after. Historic tokens start with a "t" followed by the "topological_ordering" id of the event it comes after, followed by "-", followed by the "stream_ordering" id of the event it comes after. + + There is also a third mode for live tokens where the token starts with "m", + which is sometimes used when using sharded event persisters. In this case + the events stream is considered to be a set of streams (one for each writer) + and the token encodes the vector clock of positions of each writer in their + respective streams. + + The format of the token in such case is an initial integer min position, + followed by the mapping of instance ID to position separated by '.' and '~': + + m{min_pos}~{writer1}.{pos1}~{writer2}.{pos2}. ... + + The `min_pos` corresponds to the minimum position all writers have persisted + up to, and then only writers that are ahead of that position need to be + encoded. An example token is: + + m56~2.58~3.59 + + Which corresponds to a set of three (or more writers) where instances 2 and + 3 (these are instance IDs that can be looked up in the DB to fetch the more + commonly used instance names) are at positions 58 and 59 respectively, and + all other instances are at position 56. + + Note: The `RoomStreamToken` cannot have both a topological part and an + instance map. """ topological = attr.ib( @@ -405,6 +431,25 @@ class RoomStreamToken: ) stream = attr.ib(type=int, validator=attr.validators.instance_of(int)) + instance_map = attr.ib( + type=Dict[str, int], + factory=dict, + validator=attr.validators.deep_mapping( + key_validator=attr.validators.instance_of(str), + value_validator=attr.validators.instance_of(int), + mapping_validator=attr.validators.instance_of(dict), + ), + ) + + def __attrs_post_init__(self): + """Validates that both `topological` and `instance_map` aren't set. + """ + + if self.instance_map and self.topological: + raise ValueError( + "Cannot set both 'topological' and 'instance_map' on 'RoomStreamToken'." + ) + @classmethod async def parse(cls, store: "DataStore", string: str) -> "RoomStreamToken": try: @@ -413,6 +458,20 @@ class RoomStreamToken: if string[0] == "t": parts = string[1:].split("-", 1) return cls(topological=int(parts[0]), stream=int(parts[1])) + if string[0] == "m": + parts = string[1:].split("~") + stream = int(parts[0]) + + instance_map = {} + for part in parts[1:]: + key, value = part.split(".") + instance_id = int(key) + pos = int(value) + + instance_name = await store.get_name_from_instance_id(instance_id) + instance_map[instance_name] = pos + + return cls(topological=None, stream=stream, instance_map=instance_map,) except Exception: pass raise SynapseError(400, "Invalid token %r" % (string,)) @@ -436,14 +495,61 @@ class RoomStreamToken: max_stream = max(self.stream, other.stream) - return RoomStreamToken(None, max_stream) + instance_map = { + instance: max( + self.instance_map.get(instance, self.stream), + other.instance_map.get(instance, other.stream), + ) + for instance in set(self.instance_map).union(other.instance_map) + } + + return RoomStreamToken(None, max_stream, instance_map) + + def as_historical_tuple(self) -> Tuple[int, int]: + """Returns a tuple of `(topological, stream)` for historical tokens. + + Raises if not an historical token (i.e. doesn't have a topological part). + """ + if self.topological is None: + raise Exception( + "Cannot call `RoomStreamToken.as_historical_tuple` on live token" + ) - def as_tuple(self) -> Tuple[Optional[int], int]: return (self.topological, self.stream) + def get_stream_pos_for_instance(self, instance_name: str) -> int: + """Get the stream position that the given writer was at at this token. + + This only makes sense for "live" tokens that may have a vector clock + component, and so asserts that this is a "live" token. + """ + assert self.topological is None + + # If we don't have an entry for the instance we can assume that it was + # at `self.stream`. + return self.instance_map.get(instance_name, self.stream) + + def get_max_stream_pos(self) -> int: + """Get the maximum stream position referenced in this token. + + The corresponding "min" position is, by definition just `self.stream`. + + This is used to handle tokens that have non-empty `instance_map`, and so + reference stream positions after the `self.stream` position. + """ + return max(self.instance_map.values(), default=self.stream) + async def to_string(self, store: "DataStore") -> str: if self.topological is not None: return "t%d-%d" % (self.topological, self.stream) + elif self.instance_map: + entries = [] + for name, pos in self.instance_map.items(): + instance_id = await store.get_id_for_instance(name) + entries.append("{}.{}".format(instance_id, pos)) + + encoded_map = "~".join(entries) + return "m{}~{}".format(self.stream, encoded_map) else: return "s%d" % (self.stream,) @@ -535,7 +641,7 @@ class PersistedEventPosition: stream = attr.ib(type=int) def persisted_after(self, token: RoomStreamToken) -> bool: - return token.stream < self.stream + return token.get_stream_pos_for_instance(self.instance_name) < self.stream def to_room_stream_token(self) -> RoomStreamToken: """Converts the position to a room stream token such that events From ae5b2a72c09d67311c9830f5a6fae1decce03e1f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 7 Oct 2020 15:15:57 +0100 Subject: [PATCH 127/134] Reduce serialization errors in MultiWriterIdGen (#8456) We call `_update_stream_positions_table_txn` a lot, which is an UPSERT that can conflict in `REPEATABLE READ` isolation level. Instead of doing a transaction consisting of a single query we may as well run it outside of a transaction. --- changelog.d/8456.misc | 1 + synapse/storage/database.py | 69 ++++++++++++++++++++++++--- synapse/storage/engines/_base.py | 17 +++++++ synapse/storage/engines/postgres.py | 10 +++- synapse/storage/engines/sqlite.py | 10 ++++ synapse/storage/util/id_generators.py | 12 ++++- tests/storage/test_base.py | 1 + 7 files changed, 112 insertions(+), 8 deletions(-) create mode 100644 changelog.d/8456.misc diff --git a/changelog.d/8456.misc b/changelog.d/8456.misc new file mode 100644 index 0000000000..ccd260069b --- /dev/null +++ b/changelog.d/8456.misc @@ -0,0 +1 @@ +Reduce number of serialization errors of `MultiWriterIdGenerator._update_table`. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 0d9d9b7cc0..0ba3a025cf 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -463,6 +463,24 @@ class DatabasePool: *args: Any, **kwargs: Any ) -> R: + """Start a new database transaction with the given connection. + + Note: The given func may be called multiple times under certain + failure modes. This is normally fine when in a standard transaction, + but care must be taken if the connection is in `autocommit` mode that + the function will correctly handle being aborted and retried half way + through its execution. + + Args: + conn + desc + after_callbacks + exception_callbacks + func + *args + **kwargs + """ + start = monotonic_time() txn_id = self._TXN_ID @@ -566,7 +584,12 @@ class DatabasePool: sql_txn_timer.labels(desc).observe(duration) async def runInteraction( - self, desc: str, func: "Callable[..., R]", *args: Any, **kwargs: Any + self, + desc: str, + func: "Callable[..., R]", + *args: Any, + db_autocommit: bool = False, + **kwargs: Any ) -> R: """Starts a transaction on the database and runs a given function @@ -576,6 +599,18 @@ class DatabasePool: database transaction (twisted.enterprise.adbapi.Transaction) as its first argument, followed by `args` and `kwargs`. + db_autocommit: Whether to run the function in "autocommit" mode, + i.e. outside of a transaction. This is useful for transactions + that are only a single query. + + Currently, this is only implemented for Postgres. SQLite will still + run the function inside a transaction. + + WARNING: This means that if func fails half way through then + the changes will *not* be rolled back. `func` may also get + called multiple times if the transaction is retried, so must + correctly handle that case. + args: positional args to pass to `func` kwargs: named args to pass to `func` @@ -596,6 +631,7 @@ class DatabasePool: exception_callbacks, func, *args, + db_autocommit=db_autocommit, **kwargs ) @@ -609,7 +645,11 @@ class DatabasePool: return cast(R, result) async def runWithConnection( - self, func: "Callable[..., R]", *args: Any, **kwargs: Any + self, + func: "Callable[..., R]", + *args: Any, + db_autocommit: bool = False, + **kwargs: Any ) -> R: """Wraps the .runWithConnection() method on the underlying db_pool. @@ -618,6 +658,9 @@ class DatabasePool: database connection (twisted.enterprise.adbapi.Connection) as its first argument, followed by `args` and `kwargs`. args: positional args to pass to `func` + db_autocommit: Whether to run the function in "autocommit" mode, + i.e. outside of a transaction. This is useful for transaction + that are only a single query. Currently only affects postgres. kwargs: named args to pass to `func` Returns: @@ -633,6 +676,13 @@ class DatabasePool: start_time = monotonic_time() def inner_func(conn, *args, **kwargs): + # We shouldn't be in a transaction. If we are then something + # somewhere hasn't committed after doing work. (This is likely only + # possible during startup, as `run*` will ensure changes are + # committed/rolled back before putting the connection back in the + # pool). + assert not self.engine.in_transaction(conn) + with LoggingContext("runWithConnection", parent_context) as context: sched_duration_sec = monotonic_time() - start_time sql_scheduling_timer.observe(sched_duration_sec) @@ -642,10 +692,17 @@ class DatabasePool: logger.debug("Reconnecting closed database connection") conn.reconnect() - db_conn = LoggingDatabaseConnection( - conn, self.engine, "runWithConnection" - ) - return func(db_conn, *args, **kwargs) + try: + if db_autocommit: + self.engine.attempt_to_set_autocommit(conn, True) + + db_conn = LoggingDatabaseConnection( + conn, self.engine, "runWithConnection" + ) + return func(db_conn, *args, **kwargs) + finally: + if db_autocommit: + self.engine.attempt_to_set_autocommit(conn, False) return await make_deferred_yieldable( self._db_pool.runWithConnection(inner_func, *args, **kwargs) diff --git a/synapse/storage/engines/_base.py b/synapse/storage/engines/_base.py index 908cbc79e3..d6d632dc10 100644 --- a/synapse/storage/engines/_base.py +++ b/synapse/storage/engines/_base.py @@ -97,3 +97,20 @@ class BaseDatabaseEngine(Generic[ConnectionType], metaclass=abc.ABCMeta): """Gets a string giving the server version. For example: '3.22.0' """ ... + + @abc.abstractmethod + def in_transaction(self, conn: Connection) -> bool: + """Whether the connection is currently in a transaction. + """ + ... + + @abc.abstractmethod + def attempt_to_set_autocommit(self, conn: Connection, autocommit: bool): + """Attempt to set the connections autocommit mode. + + When True queries are run outside of transactions. + + Note: This has no effect on SQLite3, so callers still need to + commit/rollback the connections. + """ + ... diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index ff39281f85..7719ac32f7 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -15,7 +15,8 @@ import logging -from ._base import BaseDatabaseEngine, IncorrectDatabaseSetup +from synapse.storage.engines._base import BaseDatabaseEngine, IncorrectDatabaseSetup +from synapse.storage.types import Connection logger = logging.getLogger(__name__) @@ -119,6 +120,7 @@ class PostgresEngine(BaseDatabaseEngine): cursor.execute("SET synchronous_commit TO OFF") cursor.close() + db_conn.commit() @property def can_native_upsert(self): @@ -171,3 +173,9 @@ class PostgresEngine(BaseDatabaseEngine): return "%i.%i" % (numver / 10000, numver % 10000) else: return "%i.%i.%i" % (numver / 10000, (numver % 10000) / 100, numver % 100) + + def in_transaction(self, conn: Connection) -> bool: + return conn.status != self.module.extensions.STATUS_READY # type: ignore + + def attempt_to_set_autocommit(self, conn: Connection, autocommit: bool): + return conn.set_session(autocommit=autocommit) # type: ignore diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py index 8a0f8c89d1..5db0f0b520 100644 --- a/synapse/storage/engines/sqlite.py +++ b/synapse/storage/engines/sqlite.py @@ -17,6 +17,7 @@ import threading import typing from synapse.storage.engines import BaseDatabaseEngine +from synapse.storage.types import Connection if typing.TYPE_CHECKING: import sqlite3 # noqa: F401 @@ -86,6 +87,7 @@ class Sqlite3Engine(BaseDatabaseEngine["sqlite3.Connection"]): db_conn.create_function("rank", 1, _rank) db_conn.execute("PRAGMA foreign_keys = ON;") + db_conn.commit() def is_deadlock(self, error): return False @@ -105,6 +107,14 @@ class Sqlite3Engine(BaseDatabaseEngine["sqlite3.Connection"]): """ return "%i.%i.%i" % self.module.sqlite_version_info + def in_transaction(self, conn: Connection) -> bool: + return conn.in_transaction # type: ignore + + def attempt_to_set_autocommit(self, conn: Connection, autocommit: bool): + # Twisted doesn't let us set attributes on the connections, so we can't + # set the connection to autocommit mode. + pass + # Following functions taken from: https://github.com/coleifer/peewee diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 51f680d05d..d7e40aaa8b 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -24,6 +24,7 @@ from typing_extensions import Deque from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.database import DatabasePool, LoggingTransaction +from synapse.storage.types import Cursor from synapse.storage.util.sequence import PostgresSequenceGenerator logger = logging.getLogger(__name__) @@ -548,7 +549,7 @@ class MultiWriterIdGenerator: # do. break - def _update_stream_positions_table_txn(self, txn): + def _update_stream_positions_table_txn(self, txn: Cursor): """Update the `stream_positions` table with newly persisted position. """ @@ -598,10 +599,13 @@ class _MultiWriterCtxManager: stream_ids = attr.ib(type=List[int], factory=list) async def __aenter__(self) -> Union[int, List[int]]: + # It's safe to run this in autocommit mode as fetching values from a + # sequence ignores transaction semantics anyway. self.stream_ids = await self.id_gen._db.runInteraction( "_load_next_mult_id", self.id_gen._load_next_mult_id_txn, self.multiple_ids or 1, + db_autocommit=True, ) # Assert the fetched ID is actually greater than any ID we've already @@ -632,10 +636,16 @@ class _MultiWriterCtxManager: # # We only do this on the success path so that the persisted current # position points to a persisted row with the correct instance name. + # + # We do this in autocommit mode as a) the upsert works correctly outside + # transactions and b) reduces the amount of time the rows are locked + # for. If we don't do this then we'll often hit serialization errors due + # to the fact we default to REPEATABLE READ isolation levels. if self.id_gen._writers: await self.id_gen._db.runInteraction( "MultiWriterIdGenerator._update_table", self.id_gen._update_stream_positions_table_txn, + db_autocommit=True, ) return False diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py index 40ba652248..eac7e4dcd2 100644 --- a/tests/storage/test_base.py +++ b/tests/storage/test_base.py @@ -56,6 +56,7 @@ class SQLBaseStoreTestCase(unittest.TestCase): engine = create_engine(sqlite_config) fake_engine = Mock(wraps=engine) fake_engine.can_native_upsert = False + fake_engine.in_transaction.return_value = False db = DatabasePool(Mock(), Mock(config=sqlite_config), fake_engine) db._db_pool = self.db_pool From 8dbf62fada36f11a915cea4b6445f716e931dea3 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 7 Oct 2020 11:13:38 -0400 Subject: [PATCH 128/134] Include the configured log level in phone home stats. (#8477) By reporting the log level of the synapse logger as a string. --- changelog.d/8477.misc | 1 + synapse/app/phone_stats_home.py | 7 +++++++ 2 files changed, 8 insertions(+) create mode 100644 changelog.d/8477.misc diff --git a/changelog.d/8477.misc b/changelog.d/8477.misc new file mode 100644 index 0000000000..2ee1606b6e --- /dev/null +++ b/changelog.d/8477.misc @@ -0,0 +1 @@ +Include the log level in the phone home stats. diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py index 2c8e14a8c0..daed8ccfe9 100644 --- a/synapse/app/phone_stats_home.py +++ b/synapse/app/phone_stats_home.py @@ -113,6 +113,13 @@ async def phone_stats_home(hs, stats, stats_process=_stats_process): stats["database_engine"] = hs.get_datastore().db_pool.engine.module.__name__ stats["database_server_version"] = hs.get_datastore().db_pool.engine.server_version + # + # Logging configuration + # + synapse_logger = logging.getLogger("synapse") + log_level = synapse_logger.getEffectiveLevel() + stats["log_level"] = logging.getLevelName(log_level) + logger.info("Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats)) try: await hs.get_proxied_http_client().put_json( From e4f72ddc44367d0cd53e6cfc5ba310b6f55319b6 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 7 Oct 2020 11:27:56 -0400 Subject: [PATCH 129/134] Move additional tasks to the background worker (#8458) --- changelog.d/8458.feature | 1 + synapse/app/generic_worker.py | 4 + synapse/app/phone_stats_home.py | 33 ++-- synapse/storage/databases/main/client_ips.py | 109 ++++++----- synapse/storage/databases/main/metrics.py | 14 +- .../storage/databases/main/registration.py | 184 +++++++++--------- synapse/storage/databases/main/roommember.py | 5 +- .../storage/databases/main/transactions.py | 42 ++-- 8 files changed, 195 insertions(+), 197 deletions(-) create mode 100644 changelog.d/8458.feature diff --git a/changelog.d/8458.feature b/changelog.d/8458.feature new file mode 100644 index 0000000000..542993110b --- /dev/null +++ b/changelog.d/8458.feature @@ -0,0 +1 @@ +Allow running background tasks in a separate worker process. diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index fc5188ce95..d53181deb1 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -127,6 +127,7 @@ from synapse.rest.health import HealthResource from synapse.rest.key.v2 import KeyApiV2Resource from synapse.server import HomeServer, cache_in_self from synapse.storage.databases.main.censor_events import CensorEventsStore +from synapse.storage.databases.main.client_ips import ClientIpWorkerStore from synapse.storage.databases.main.media_repository import MediaRepositoryStore from synapse.storage.databases.main.metrics import ServerMetricsStore from synapse.storage.databases.main.monthly_active_users import ( @@ -135,6 +136,7 @@ from synapse.storage.databases.main.monthly_active_users import ( from synapse.storage.databases.main.presence import UserPresenceState from synapse.storage.databases.main.search import SearchWorkerStore from synapse.storage.databases.main.stats import StatsStore +from synapse.storage.databases.main.transactions import TransactionWorkerStore from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore from synapse.storage.databases.main.user_directory import UserDirectoryStore from synapse.types import ReadReceipt @@ -466,6 +468,7 @@ class GenericWorkerSlavedStore( SlavedAccountDataStore, SlavedPusherStore, CensorEventsStore, + ClientIpWorkerStore, SlavedEventStore, SlavedKeyStore, RoomStore, @@ -481,6 +484,7 @@ class GenericWorkerSlavedStore( MediaRepositoryStore, ServerMetricsStore, SearchWorkerStore, + TransactionWorkerStore, BaseSlavedStore, ): pass diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py index daed8ccfe9..8a69104a04 100644 --- a/synapse/app/phone_stats_home.py +++ b/synapse/app/phone_stats_home.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import logging import math import resource @@ -19,7 +18,10 @@ import sys from prometheus_client import Gauge -from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.metrics.background_process_metrics import ( + run_as_background_process, + wrap_as_background_process, +) logger = logging.getLogger("synapse.app.homeserver") @@ -41,6 +43,7 @@ registered_reserved_users_mau_gauge = Gauge( ) +@wrap_as_background_process("phone_stats_home") async def phone_stats_home(hs, stats, stats_process=_stats_process): logger.info("Gathering stats for reporting") now = int(hs.get_clock().time()) @@ -143,20 +146,10 @@ def start_phone_stats_home(hs): (int(hs.get_clock().time()), resource.getrusage(resource.RUSAGE_SELF)) ) - def start_phone_stats_home(): - return run_as_background_process( - "phone_stats_home", phone_stats_home, hs, stats - ) - - def generate_user_daily_visit_stats(): - return run_as_background_process( - "generate_user_daily_visits", hs.get_datastore().generate_user_daily_visits - ) - # Rather than update on per session basis, batch up the requests. # If you increase the loop period, the accuracy of user_daily_visits # table will decrease - clock.looping_call(generate_user_daily_visit_stats, 5 * 60 * 1000) + clock.looping_call(hs.get_datastore().generate_user_daily_visits, 5 * 60 * 1000) # monthly active user limiting functionality def reap_monthly_active_users(): @@ -167,6 +160,7 @@ def start_phone_stats_home(hs): clock.looping_call(reap_monthly_active_users, 1000 * 60 * 60) reap_monthly_active_users() + @wrap_as_background_process("generate_monthly_active_users") async def generate_monthly_active_users(): current_mau_count = 0 current_mau_count_by_service = {} @@ -186,19 +180,14 @@ def start_phone_stats_home(hs): registered_reserved_users_mau_gauge.set(float(len(reserved_users))) max_mau_gauge.set(float(hs.config.max_mau_value)) - def start_generate_monthly_active_users(): - return run_as_background_process( - "generate_monthly_active_users", generate_monthly_active_users - ) - if hs.config.limit_usage_by_mau or hs.config.mau_stats_only: - start_generate_monthly_active_users() - clock.looping_call(start_generate_monthly_active_users, 5 * 60 * 1000) + generate_monthly_active_users() + clock.looping_call(generate_monthly_active_users, 5 * 60 * 1000) # End of monthly active user settings if hs.config.report_stats: logger.info("Scheduling stats reporting for 3 hour intervals") - clock.looping_call(start_phone_stats_home, 3 * 60 * 60 * 1000) + clock.looping_call(phone_stats_home, 3 * 60 * 60 * 1000, hs, stats) # We need to defer this init for the cases that we daemonize # otherwise the process ID we get is that of the non-daemon process @@ -206,4 +195,4 @@ def start_phone_stats_home(hs): # We wait 5 minutes to send the first set of stats as the server can # be quite busy the first few minutes - clock.call_later(5 * 60, start_phone_stats_home) + clock.call_later(5 * 60, phone_stats_home, hs, stats) diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index 239c7a949c..a25a888443 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -351,7 +351,63 @@ class ClientIpBackgroundUpdateStore(SQLBaseStore): return updated -class ClientIpStore(ClientIpBackgroundUpdateStore): +class ClientIpWorkerStore(ClientIpBackgroundUpdateStore): + def __init__(self, database: DatabasePool, db_conn, hs): + super().__init__(database, db_conn, hs) + + self.user_ips_max_age = hs.config.user_ips_max_age + + if hs.config.run_background_tasks and self.user_ips_max_age: + self._clock.looping_call(self._prune_old_user_ips, 5 * 1000) + + @wrap_as_background_process("prune_old_user_ips") + async def _prune_old_user_ips(self): + """Removes entries in user IPs older than the configured period. + """ + + if self.user_ips_max_age is None: + # Nothing to do + return + + if not await self.db_pool.updates.has_completed_background_update( + "devices_last_seen" + ): + # Only start pruning if we have finished populating the devices + # last seen info. + return + + # We do a slightly funky SQL delete to ensure we don't try and delete + # too much at once (as the table may be very large from before we + # started pruning). + # + # This works by finding the max last_seen that is less than the given + # time, but has no more than N rows before it, deleting all rows with + # a lesser last_seen time. (We COALESCE so that the sub-SELECT always + # returns exactly one row). + sql = """ + DELETE FROM user_ips + WHERE last_seen <= ( + SELECT COALESCE(MAX(last_seen), -1) + FROM ( + SELECT last_seen FROM user_ips + WHERE last_seen <= ? + ORDER BY last_seen ASC + LIMIT 5000 + ) AS u + ) + """ + + timestamp = self.clock.time_msec() - self.user_ips_max_age + + def _prune_old_user_ips_txn(txn): + txn.execute(sql, (timestamp,)) + + await self.db_pool.runInteraction( + "_prune_old_user_ips", _prune_old_user_ips_txn + ) + + +class ClientIpStore(ClientIpWorkerStore): def __init__(self, database: DatabasePool, db_conn, hs): self.client_ip_last_seen = Cache( @@ -360,8 +416,6 @@ class ClientIpStore(ClientIpBackgroundUpdateStore): super().__init__(database, db_conn, hs) - self.user_ips_max_age = hs.config.user_ips_max_age - # (user_id, access_token, ip,) -> (user_agent, device_id, last_seen) self._batch_row_update = {} @@ -372,9 +426,6 @@ class ClientIpStore(ClientIpBackgroundUpdateStore): "before", "shutdown", self._update_client_ips_batch ) - if self.user_ips_max_age: - self._clock.looping_call(self._prune_old_user_ips, 5 * 1000) - async def insert_client_ip( self, user_id, access_token, ip, user_agent, device_id, now=None ): @@ -525,49 +576,3 @@ class ClientIpStore(ClientIpBackgroundUpdateStore): } for (access_token, ip), (user_agent, last_seen) in results.items() ] - - @wrap_as_background_process("prune_old_user_ips") - async def _prune_old_user_ips(self): - """Removes entries in user IPs older than the configured period. - """ - - if self.user_ips_max_age is None: - # Nothing to do - return - - if not await self.db_pool.updates.has_completed_background_update( - "devices_last_seen" - ): - # Only start pruning if we have finished populating the devices - # last seen info. - return - - # We do a slightly funky SQL delete to ensure we don't try and delete - # too much at once (as the table may be very large from before we - # started pruning). - # - # This works by finding the max last_seen that is less than the given - # time, but has no more than N rows before it, deleting all rows with - # a lesser last_seen time. (We COALESCE so that the sub-SELECT always - # returns exactly one row). - sql = """ - DELETE FROM user_ips - WHERE last_seen <= ( - SELECT COALESCE(MAX(last_seen), -1) - FROM ( - SELECT last_seen FROM user_ips - WHERE last_seen <= ? - ORDER BY last_seen ASC - LIMIT 5000 - ) AS u - ) - """ - - timestamp = self.clock.time_msec() - self.user_ips_max_age - - def _prune_old_user_ips_txn(txn): - txn.execute(sql, (timestamp,)) - - await self.db_pool.runInteraction( - "_prune_old_user_ips", _prune_old_user_ips_txn - ) diff --git a/synapse/storage/databases/main/metrics.py b/synapse/storage/databases/main/metrics.py index 2c5a4fdbf6..0acf0617ca 100644 --- a/synapse/storage/databases/main/metrics.py +++ b/synapse/storage/databases/main/metrics.py @@ -18,7 +18,7 @@ import time from typing import Dict from synapse.metrics import GaugeBucketCollector -from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore from synapse.storage.database import DatabasePool from synapse.storage.databases.main.event_push_actions import ( @@ -57,18 +57,13 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): super().__init__(database, db_conn, hs) # Read the extrems every 60 minutes - def read_forward_extremities(): - # run as a background process to make sure that the database transactions - # have a logcontext to report to - return run_as_background_process( - "read_forward_extremities", self._read_forward_extremities - ) - - hs.get_clock().looping_call(read_forward_extremities, 60 * 60 * 1000) + if hs.config.run_background_tasks: + self._clock.looping_call(self._read_forward_extremities, 60 * 60 * 1000) # Used in _generate_user_daily_visits to keep track of progress self._last_user_visit_update = self._get_start_of_day() + @wrap_as_background_process("read_forward_extremities") async def _read_forward_extremities(self): def fetch(txn): txn.execute( @@ -274,6 +269,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore): today_start = calendar.timegm((now.tm_year, now.tm_mon, now.tm_mday, 0, 0, 0)) return today_start * 1000 + @wrap_as_background_process("generate_user_daily_visits") async def generate_user_daily_visits(self) -> None: """ Generates daily visit data for use in cohort/ retention analysis diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 16ba545740..a85867936f 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -14,14 +14,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import logging import re from typing import Any, Dict, List, Optional, Tuple from synapse.api.constants import UserTypes from synapse.api.errors import Codes, StoreError, SynapseError, ThreepidValidationError -from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.metrics.background_process_metrics import ( + run_as_background_process, + wrap_as_background_process, +) from synapse.storage._base import SQLBaseStore from synapse.storage.database import DatabasePool from synapse.storage.types import Cursor @@ -48,6 +50,21 @@ class RegistrationWorkerStore(SQLBaseStore): database.engine, find_max_generated_user_id_localpart, "user_id_seq", ) + self._account_validity = hs.config.account_validity + if hs.config.run_background_tasks and self._account_validity.enabled: + self._clock.call_later( + 0.0, + run_as_background_process, + "account_validity_set_expiration_dates", + self._set_expiration_date_when_missing, + ) + + # Create a background job for culling expired 3PID validity tokens + if hs.config.run_background_tasks: + self.clock.looping_call( + self.cull_expired_threepid_validation_tokens, THIRTY_MINUTES_IN_MS + ) + @cached() async def get_user_by_id(self, user_id: str) -> Optional[Dict[str, Any]]: return await self.db_pool.simple_select_one( @@ -778,6 +795,78 @@ class RegistrationWorkerStore(SQLBaseStore): "delete_threepid_session", delete_threepid_session_txn ) + @wrap_as_background_process("cull_expired_threepid_validation_tokens") + async def cull_expired_threepid_validation_tokens(self) -> None: + """Remove threepid validation tokens with expiry dates that have passed""" + + def cull_expired_threepid_validation_tokens_txn(txn, ts): + sql = """ + DELETE FROM threepid_validation_token WHERE + expires < ? + """ + txn.execute(sql, (ts,)) + + await self.db_pool.runInteraction( + "cull_expired_threepid_validation_tokens", + cull_expired_threepid_validation_tokens_txn, + self.clock.time_msec(), + ) + + async def _set_expiration_date_when_missing(self): + """ + Retrieves the list of registered users that don't have an expiration date, and + adds an expiration date for each of them. + """ + + def select_users_with_no_expiration_date_txn(txn): + """Retrieves the list of registered users with no expiration date from the + database, filtering out deactivated users. + """ + sql = ( + "SELECT users.name FROM users" + " LEFT JOIN account_validity ON (users.name = account_validity.user_id)" + " WHERE account_validity.user_id is NULL AND users.deactivated = 0;" + ) + txn.execute(sql, []) + + res = self.db_pool.cursor_to_dict(txn) + if res: + for user in res: + self.set_expiration_date_for_user_txn( + txn, user["name"], use_delta=True + ) + + await self.db_pool.runInteraction( + "get_users_with_no_expiration_date", + select_users_with_no_expiration_date_txn, + ) + + def set_expiration_date_for_user_txn(self, txn, user_id, use_delta=False): + """Sets an expiration date to the account with the given user ID. + + Args: + user_id (str): User ID to set an expiration date for. + use_delta (bool): If set to False, the expiration date for the user will be + now + validity period. If set to True, this expiration date will be a + random value in the [now + period - d ; now + period] range, d being a + delta equal to 10% of the validity period. + """ + now_ms = self._clock.time_msec() + expiration_ts = now_ms + self._account_validity.period + + if use_delta: + expiration_ts = self.rand.randrange( + expiration_ts - self._account_validity.startup_job_max_delta, + expiration_ts, + ) + + self.db_pool.simple_upsert_txn( + txn, + "account_validity", + keyvalues={"user_id": user_id}, + values={"expiration_ts_ms": expiration_ts, "email_sent": False}, + ) + class RegistrationBackgroundUpdateStore(RegistrationWorkerStore): def __init__(self, database: DatabasePool, db_conn, hs): @@ -911,28 +1000,8 @@ class RegistrationStore(RegistrationBackgroundUpdateStore): def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) - self._account_validity = hs.config.account_validity self._ignore_unknown_session_error = hs.config.request_token_inhibit_3pid_errors - if self._account_validity.enabled: - self._clock.call_later( - 0.0, - run_as_background_process, - "account_validity_set_expiration_dates", - self._set_expiration_date_when_missing, - ) - - # Create a background job for culling expired 3PID validity tokens - def start_cull(): - # run as a background process to make sure that the database transactions - # have a logcontext to report to - return run_as_background_process( - "cull_expired_threepid_validation_tokens", - self.cull_expired_threepid_validation_tokens, - ) - - hs.get_clock().looping_call(start_cull, THIRTY_MINUTES_IN_MS) - async def add_access_token_to_user( self, user_id: str, @@ -1477,22 +1546,6 @@ class RegistrationStore(RegistrationBackgroundUpdateStore): start_or_continue_validation_session_txn, ) - async def cull_expired_threepid_validation_tokens(self) -> None: - """Remove threepid validation tokens with expiry dates that have passed""" - - def cull_expired_threepid_validation_tokens_txn(txn, ts): - sql = """ - DELETE FROM threepid_validation_token WHERE - expires < ? - """ - txn.execute(sql, (ts,)) - - await self.db_pool.runInteraction( - "cull_expired_threepid_validation_tokens", - cull_expired_threepid_validation_tokens_txn, - self.clock.time_msec(), - ) - async def set_user_deactivated_status( self, user_id: str, deactivated: bool ) -> None: @@ -1522,61 +1575,6 @@ class RegistrationStore(RegistrationBackgroundUpdateStore): ) txn.call_after(self.is_guest.invalidate, (user_id,)) - async def _set_expiration_date_when_missing(self): - """ - Retrieves the list of registered users that don't have an expiration date, and - adds an expiration date for each of them. - """ - - def select_users_with_no_expiration_date_txn(txn): - """Retrieves the list of registered users with no expiration date from the - database, filtering out deactivated users. - """ - sql = ( - "SELECT users.name FROM users" - " LEFT JOIN account_validity ON (users.name = account_validity.user_id)" - " WHERE account_validity.user_id is NULL AND users.deactivated = 0;" - ) - txn.execute(sql, []) - - res = self.db_pool.cursor_to_dict(txn) - if res: - for user in res: - self.set_expiration_date_for_user_txn( - txn, user["name"], use_delta=True - ) - - await self.db_pool.runInteraction( - "get_users_with_no_expiration_date", - select_users_with_no_expiration_date_txn, - ) - - def set_expiration_date_for_user_txn(self, txn, user_id, use_delta=False): - """Sets an expiration date to the account with the given user ID. - - Args: - user_id (str): User ID to set an expiration date for. - use_delta (bool): If set to False, the expiration date for the user will be - now + validity period. If set to True, this expiration date will be a - random value in the [now + period - d ; now + period] range, d being a - delta equal to 10% of the validity period. - """ - now_ms = self._clock.time_msec() - expiration_ts = now_ms + self._account_validity.period - - if use_delta: - expiration_ts = self.rand.randrange( - expiration_ts - self._account_validity.startup_job_max_delta, - expiration_ts, - ) - - self.db_pool.simple_upsert_txn( - txn, - "account_validity", - keyvalues={"user_id": user_id}, - values={"expiration_ts_ms": expiration_ts, "email_sent": False}, - ) - def find_max_generated_user_id_localpart(cur: Cursor) -> int: """ diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index bae1bd22d3..20fcdaa529 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -61,7 +61,10 @@ class RoomMemberWorkerStore(EventsWorkerStore): self._check_safe_current_state_events_membership_updated_txn(txn) txn.close() - if self.hs.config.metrics_flags.known_servers: + if ( + self.hs.config.run_background_tasks + and self.hs.config.metrics_flags.known_servers + ): self._known_servers_count = 1 self.hs.get_clock().looping_call( run_as_background_process, diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index 97aed1500e..7d46090267 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -19,7 +19,7 @@ from typing import Iterable, List, Optional, Tuple from canonicaljson import encode_canonical_json -from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import DatabasePool, LoggingTransaction from synapse.storage.engines import PostgresEngine, Sqlite3Engine @@ -43,15 +43,33 @@ _UpdateTransactionRow = namedtuple( SENTINEL = object() -class TransactionStore(SQLBaseStore): +class TransactionWorkerStore(SQLBaseStore): + def __init__(self, database: DatabasePool, db_conn, hs): + super().__init__(database, db_conn, hs) + + if hs.config.run_background_tasks: + self._clock.looping_call(self._cleanup_transactions, 30 * 60 * 1000) + + @wrap_as_background_process("cleanup_transactions") + async def _cleanup_transactions(self) -> None: + now = self._clock.time_msec() + month_ago = now - 30 * 24 * 60 * 60 * 1000 + + def _cleanup_transactions_txn(txn): + txn.execute("DELETE FROM received_transactions WHERE ts < ?", (month_ago,)) + + await self.db_pool.runInteraction( + "_cleanup_transactions", _cleanup_transactions_txn + ) + + +class TransactionStore(TransactionWorkerStore): """A collection of queries for handling PDUs. """ def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) - self._clock.looping_call(self._start_cleanup_transactions, 30 * 60 * 1000) - self._destination_retry_cache = ExpiringCache( cache_name="get_destination_retry_timings", clock=self._clock, @@ -266,22 +284,6 @@ class TransactionStore(SQLBaseStore): }, ) - def _start_cleanup_transactions(self): - return run_as_background_process( - "cleanup_transactions", self._cleanup_transactions - ) - - async def _cleanup_transactions(self) -> None: - now = self._clock.time_msec() - month_ago = now - 30 * 24 * 60 * 60 * 1000 - - def _cleanup_transactions_txn(txn): - txn.execute("DELETE FROM received_transactions WHERE ts < ?", (month_ago,)) - - await self.db_pool.runInteraction( - "_cleanup_transactions", _cleanup_transactions_txn - ) - async def store_destination_rooms_entries( self, destinations: Iterable[str], room_id: str, stream_ordering: int, ) -> None: From fa8934b175467d589dd34fae18639cac0d738fc9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 7 Oct 2020 15:15:57 +0100 Subject: [PATCH 130/134] Reduce serialization errors in MultiWriterIdGen (#8456) We call `_update_stream_positions_table_txn` a lot, which is an UPSERT that can conflict in `REPEATABLE READ` isolation level. Instead of doing a transaction consisting of a single query we may as well run it outside of a transaction. --- changelog.d/8456.misc | 1 + synapse/storage/database.py | 63 +++++++++++++++++++++++++-- synapse/storage/engines/_base.py | 17 ++++++++ synapse/storage/engines/postgres.py | 10 ++++- synapse/storage/engines/sqlite.py | 10 +++++ synapse/storage/util/id_generators.py | 12 ++++- tests/storage/test_base.py | 1 + 7 files changed, 109 insertions(+), 5 deletions(-) create mode 100644 changelog.d/8456.misc diff --git a/changelog.d/8456.misc b/changelog.d/8456.misc new file mode 100644 index 0000000000..ccd260069b --- /dev/null +++ b/changelog.d/8456.misc @@ -0,0 +1 @@ +Reduce number of serialization errors of `MultiWriterIdGenerator._update_table`. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 79ec8f119d..6116191b16 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -403,6 +403,24 @@ class DatabasePool: *args: Any, **kwargs: Any ) -> R: + """Start a new database transaction with the given connection. + + Note: The given func may be called multiple times under certain + failure modes. This is normally fine when in a standard transaction, + but care must be taken if the connection is in `autocommit` mode that + the function will correctly handle being aborted and retried half way + through its execution. + + Args: + conn + desc + after_callbacks + exception_callbacks + func + *args + **kwargs + """ + start = monotonic_time() txn_id = self._TXN_ID @@ -508,7 +526,12 @@ class DatabasePool: sql_txn_timer.labels(desc).observe(duration) async def runInteraction( - self, desc: str, func: "Callable[..., R]", *args: Any, **kwargs: Any + self, + desc: str, + func: "Callable[..., R]", + *args: Any, + db_autocommit: bool = False, + **kwargs: Any ) -> R: """Starts a transaction on the database and runs a given function @@ -518,6 +541,18 @@ class DatabasePool: database transaction (twisted.enterprise.adbapi.Transaction) as its first argument, followed by `args` and `kwargs`. + db_autocommit: Whether to run the function in "autocommit" mode, + i.e. outside of a transaction. This is useful for transactions + that are only a single query. + + Currently, this is only implemented for Postgres. SQLite will still + run the function inside a transaction. + + WARNING: This means that if func fails half way through then + the changes will *not* be rolled back. `func` may also get + called multiple times if the transaction is retried, so must + correctly handle that case. + args: positional args to pass to `func` kwargs: named args to pass to `func` @@ -538,6 +573,7 @@ class DatabasePool: exception_callbacks, func, *args, + db_autocommit=db_autocommit, **kwargs ) @@ -551,7 +587,11 @@ class DatabasePool: return cast(R, result) async def runWithConnection( - self, func: "Callable[..., R]", *args: Any, **kwargs: Any + self, + func: "Callable[..., R]", + *args: Any, + db_autocommit: bool = False, + **kwargs: Any ) -> R: """Wraps the .runWithConnection() method on the underlying db_pool. @@ -560,6 +600,9 @@ class DatabasePool: database connection (twisted.enterprise.adbapi.Connection) as its first argument, followed by `args` and `kwargs`. args: positional args to pass to `func` + db_autocommit: Whether to run the function in "autocommit" mode, + i.e. outside of a transaction. This is useful for transaction + that are only a single query. Currently only affects postgres. kwargs: named args to pass to `func` Returns: @@ -575,6 +618,13 @@ class DatabasePool: start_time = monotonic_time() def inner_func(conn, *args, **kwargs): + # We shouldn't be in a transaction. If we are then something + # somewhere hasn't committed after doing work. (This is likely only + # possible during startup, as `run*` will ensure changes are + # committed/rolled back before putting the connection back in the + # pool). + assert not self.engine.in_transaction(conn) + with LoggingContext("runWithConnection", parent_context) as context: sched_duration_sec = monotonic_time() - start_time sql_scheduling_timer.observe(sched_duration_sec) @@ -584,7 +634,14 @@ class DatabasePool: logger.debug("Reconnecting closed database connection") conn.reconnect() - return func(conn, *args, **kwargs) + try: + if db_autocommit: + self.engine.attempt_to_set_autocommit(conn, True) + + return func(conn, *args, **kwargs) + finally: + if db_autocommit: + self.engine.attempt_to_set_autocommit(conn, False) return await make_deferred_yieldable( self._db_pool.runWithConnection(inner_func, *args, **kwargs) diff --git a/synapse/storage/engines/_base.py b/synapse/storage/engines/_base.py index 908cbc79e3..d6d632dc10 100644 --- a/synapse/storage/engines/_base.py +++ b/synapse/storage/engines/_base.py @@ -97,3 +97,20 @@ class BaseDatabaseEngine(Generic[ConnectionType], metaclass=abc.ABCMeta): """Gets a string giving the server version. For example: '3.22.0' """ ... + + @abc.abstractmethod + def in_transaction(self, conn: Connection) -> bool: + """Whether the connection is currently in a transaction. + """ + ... + + @abc.abstractmethod + def attempt_to_set_autocommit(self, conn: Connection, autocommit: bool): + """Attempt to set the connections autocommit mode. + + When True queries are run outside of transactions. + + Note: This has no effect on SQLite3, so callers still need to + commit/rollback the connections. + """ + ... diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index ff39281f85..7719ac32f7 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -15,7 +15,8 @@ import logging -from ._base import BaseDatabaseEngine, IncorrectDatabaseSetup +from synapse.storage.engines._base import BaseDatabaseEngine, IncorrectDatabaseSetup +from synapse.storage.types import Connection logger = logging.getLogger(__name__) @@ -119,6 +120,7 @@ class PostgresEngine(BaseDatabaseEngine): cursor.execute("SET synchronous_commit TO OFF") cursor.close() + db_conn.commit() @property def can_native_upsert(self): @@ -171,3 +173,9 @@ class PostgresEngine(BaseDatabaseEngine): return "%i.%i" % (numver / 10000, numver % 10000) else: return "%i.%i.%i" % (numver / 10000, (numver % 10000) / 100, numver % 100) + + def in_transaction(self, conn: Connection) -> bool: + return conn.status != self.module.extensions.STATUS_READY # type: ignore + + def attempt_to_set_autocommit(self, conn: Connection, autocommit: bool): + return conn.set_session(autocommit=autocommit) # type: ignore diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py index 8a0f8c89d1..5db0f0b520 100644 --- a/synapse/storage/engines/sqlite.py +++ b/synapse/storage/engines/sqlite.py @@ -17,6 +17,7 @@ import threading import typing from synapse.storage.engines import BaseDatabaseEngine +from synapse.storage.types import Connection if typing.TYPE_CHECKING: import sqlite3 # noqa: F401 @@ -86,6 +87,7 @@ class Sqlite3Engine(BaseDatabaseEngine["sqlite3.Connection"]): db_conn.create_function("rank", 1, _rank) db_conn.execute("PRAGMA foreign_keys = ON;") + db_conn.commit() def is_deadlock(self, error): return False @@ -105,6 +107,14 @@ class Sqlite3Engine(BaseDatabaseEngine["sqlite3.Connection"]): """ return "%i.%i.%i" % self.module.sqlite_version_info + def in_transaction(self, conn: Connection) -> bool: + return conn.in_transaction # type: ignore + + def attempt_to_set_autocommit(self, conn: Connection, autocommit: bool): + # Twisted doesn't let us set attributes on the connections, so we can't + # set the connection to autocommit mode. + pass + # Following functions taken from: https://github.com/coleifer/peewee diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 48efbb5067..ad017207aa 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -24,6 +24,7 @@ from typing_extensions import Deque from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.database import DatabasePool, LoggingTransaction +from synapse.storage.types import Cursor from synapse.storage.util.sequence import PostgresSequenceGenerator logger = logging.getLogger(__name__) @@ -552,7 +553,7 @@ class MultiWriterIdGenerator: # do. break - def _update_stream_positions_table_txn(self, txn): + def _update_stream_positions_table_txn(self, txn: Cursor): """Update the `stream_positions` table with newly persisted position. """ @@ -602,10 +603,13 @@ class _MultiWriterCtxManager: stream_ids = attr.ib(type=List[int], factory=list) async def __aenter__(self) -> Union[int, List[int]]: + # It's safe to run this in autocommit mode as fetching values from a + # sequence ignores transaction semantics anyway. self.stream_ids = await self.id_gen._db.runInteraction( "_load_next_mult_id", self.id_gen._load_next_mult_id_txn, self.multiple_ids or 1, + db_autocommit=True, ) # Assert the fetched ID is actually greater than any ID we've already @@ -636,10 +640,16 @@ class _MultiWriterCtxManager: # # We only do this on the success path so that the persisted current # position points to a persisted row with the correct instance name. + # + # We do this in autocommit mode as a) the upsert works correctly outside + # transactions and b) reduces the amount of time the rows are locked + # for. If we don't do this then we'll often hit serialization errors due + # to the fact we default to REPEATABLE READ isolation levels. if self.id_gen._writers: await self.id_gen._db.runInteraction( "MultiWriterIdGenerator._update_table", self.id_gen._update_stream_positions_table_txn, + db_autocommit=True, ) return False diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py index 40ba652248..eac7e4dcd2 100644 --- a/tests/storage/test_base.py +++ b/tests/storage/test_base.py @@ -56,6 +56,7 @@ class SQLBaseStoreTestCase(unittest.TestCase): engine = create_engine(sqlite_config) fake_engine = Mock(wraps=engine) fake_engine.can_native_upsert = False + fake_engine.in_transaction.return_value = False db = DatabasePool(Mock(), Mock(config=sqlite_config), fake_engine) db._db_pool = self.db_pool From b28bfd905d51e02785979abaabf4debaf817f054 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 8 Oct 2020 11:10:15 +0100 Subject: [PATCH 131/134] Clarify error message when plugin config parsers raise an error (#8492) This turns: Failed to parse config for 'myplugin': Exception('error message') into: Failed to parse config for 'myplugin': error message. --- changelog.d/8492.misc | 1 + synapse/util/module_loader.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/8492.misc diff --git a/changelog.d/8492.misc b/changelog.d/8492.misc new file mode 100644 index 0000000000..a344aee791 --- /dev/null +++ b/changelog.d/8492.misc @@ -0,0 +1 @@ +Clarify error message when plugin config parsers raise an error. diff --git a/synapse/util/module_loader.py b/synapse/util/module_loader.py index bb62db4637..94b59afb38 100644 --- a/synapse/util/module_loader.py +++ b/synapse/util/module_loader.py @@ -36,7 +36,7 @@ def load_module(provider): try: provider_config = provider_class.parse_config(provider.get("config")) except Exception as e: - raise ConfigError("Failed to parse config for %r: %r" % (provider["module"], e)) + raise ConfigError("Failed to parse config for %r: %s" % (provider["module"], e)) return provider_class, provider_config From 719474cae0f9fd958cef1279ad559866e184e9e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateusz=20Przyby=C5=82owicz?= Date: Thu, 8 Oct 2020 12:16:56 +0200 Subject: [PATCH 132/134] Add useful shields to readme (#8493) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Added shields directing to synapse-dev room, showing license, latest version on PyPi and supported Python versions. I've moved substitution definitions to the bottom to improve readability. Signed-off-by: Mateusz PrzybyƂowicz --- README.rst | 30 +++++++++++++++++++++++------- changelog.d/8493.doc | 1 + 2 files changed, 24 insertions(+), 7 deletions(-) create mode 100644 changelog.d/8493.doc diff --git a/README.rst b/README.rst index e623cf863a..d609b4b62e 100644 --- a/README.rst +++ b/README.rst @@ -1,10 +1,6 @@ -================ -Synapse |shield| -================ - -.. |shield| image:: https://img.shields.io/matrix/synapse:matrix.org?label=support&logo=matrix - :alt: (get support on #synapse:matrix.org) - :target: https://matrix.to/#/#synapse:matrix.org +========================================================= +Synapse |support| |development| |license| |pypi| |python| +========================================================= .. contents:: @@ -374,3 +370,23 @@ something like the following in their logs:: This is normally caused by a misconfiguration in your reverse-proxy. See ``_ and double-check that your settings are correct. + +.. |support| image:: https://img.shields.io/matrix/synapse:matrix.org?label=support&logo=matrix + :alt: (get support on #synapse:matrix.org) + :target: https://matrix.to/#/#synapse:matrix.org + +.. |development| image:: https://img.shields.io/matrix/synapse-dev:matrix.org?label=development&logo=matrix + :alt: (discuss development on #synapse-dev:matrix.org) + :target: https://matrix.to/#/#synapse-dev:matrix.org + +.. |license| image:: https://img.shields.io/github/license/matrix-org/synapse + :alt: (check license in LICENSE file) + :target: LICENSE + +.. |pypi| image:: https://img.shields.io/pypi/v/matrix-synapse + :alt: (latest version released on PyPi) + :target: https://pypi.org/project/matrix-synapse + +.. |python| image:: https://img.shields.io/pypi/pyversions/matrix-synapse + :alt: (supported python versions) + :target: https://pypi.org/project/matrix-synapse diff --git a/changelog.d/8493.doc b/changelog.d/8493.doc new file mode 100644 index 0000000000..26797cd99e --- /dev/null +++ b/changelog.d/8493.doc @@ -0,0 +1 @@ +Improve readme by adding new shield.io badges. From 31fe46e0a3fc0aaa5a45c798cb33ce2d1f4accfc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 8 Oct 2020 11:19:22 +0100 Subject: [PATCH 133/134] 1.21.0rc3 --- CHANGES.md | 15 +++++++++++++++ changelog.d/8456.misc | 1 - changelog.d/8475.misc | 1 - synapse/__init__.py | 2 +- 4 files changed, 16 insertions(+), 3 deletions(-) delete mode 100644 changelog.d/8456.misc delete mode 100644 changelog.d/8475.misc diff --git a/CHANGES.md b/CHANGES.md index 5d4e80499e..5d977d2aad 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,18 @@ +Synapse 1.21.0rc3 (2020-10-08) +============================== + +Bugfixes +-------- + +- Fix duplication of events on high traffic servers, caused by PostgresQL `could not serialize access due to concurrent update` errors. ([\#8456](https://github.com/matrix-org/synapse/issues/8456)) + + +Internal Changes +---------------- + +- Add Groovy Gorilla to the list of distributions we build `.deb`s for. ([\#8475](https://github.com/matrix-org/synapse/issues/8475)) + + Synapse 1.21.0rc2 (2020-10-02) ============================== diff --git a/changelog.d/8456.misc b/changelog.d/8456.misc deleted file mode 100644 index ccd260069b..0000000000 --- a/changelog.d/8456.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce number of serialization errors of `MultiWriterIdGenerator._update_table`. diff --git a/changelog.d/8475.misc b/changelog.d/8475.misc deleted file mode 100644 index 69bcb04097..0000000000 --- a/changelog.d/8475.misc +++ /dev/null @@ -1 +0,0 @@ -Add Groovy Gorilla to the list of distributions we build `.deb`s for. diff --git a/synapse/__init__.py b/synapse/__init__.py index 500558bbdf..a86dc07ddc 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -48,7 +48,7 @@ try: except ImportError: pass -__version__ = "1.21.0rc2" +__version__ = "1.21.0rc3" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From b9c253a724aaf2798c2d0b089d9250059d24aac9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 8 Oct 2020 11:30:05 +0100 Subject: [PATCH 134/134] Update change log --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 5d977d2aad..dfdd8aa68a 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,7 +4,7 @@ Synapse 1.21.0rc3 (2020-10-08) Bugfixes -------- -- Fix duplication of events on high traffic servers, caused by PostgresQL `could not serialize access due to concurrent update` errors. ([\#8456](https://github.com/matrix-org/synapse/issues/8456)) +- Fix duplication of events on high traffic servers, caused by PostgreSQL `could not serialize access due to concurrent update` errors. ([\#8456](https://github.com/matrix-org/synapse/issues/8456)) Internal Changes