Use direct references for some configuration variables (#10798)

Instead of proxying through the magic getter of the RootConfig
object. This should be more performant (and is more explicit).
This commit is contained in:
Patrick Cloke 2021-09-13 13:07:12 -04:00 committed by GitHub
parent 9f111075e8
commit 01c88a09cd
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
66 changed files with 152 additions and 133 deletions

1
changelog.d/10798.misc Normal file
View file

@ -0,0 +1 @@
Use direct references to config flags.

View file

@ -41,11 +41,11 @@ class ConsentURIBuilder:
""" """
if hs_config.form_secret is None: if hs_config.form_secret is None:
raise ConfigError("form_secret not set in config") raise ConfigError("form_secret not set in config")
if hs_config.public_baseurl is None: if hs_config.server.public_baseurl is None:
raise ConfigError("public_baseurl not set in config") raise ConfigError("public_baseurl not set in config")
self._hmac_secret = hs_config.form_secret.encode("utf-8") self._hmac_secret = hs_config.form_secret.encode("utf-8")
self._public_baseurl = hs_config.public_baseurl self._public_baseurl = hs_config.server.public_baseurl
def build_user_consent_uri(self, user_id): def build_user_consent_uri(self, user_id):
"""Build a URI which we can give to the user to do their privacy """Build a URI which we can give to the user to do their privacy

View file

@ -82,7 +82,7 @@ def start_worker_reactor(appname, config, run_command=reactor.run):
run_command (Callable[]): callable that actually runs the reactor run_command (Callable[]): callable that actually runs the reactor
""" """
logger = logging.getLogger(config.worker_app) logger = logging.getLogger(config.worker.worker_app)
start_reactor( start_reactor(
appname, appname,
@ -398,7 +398,7 @@ async def start(hs: "HomeServer"):
# If background tasks are running on the main process, start collecting the # If background tasks are running on the main process, start collecting the
# phone home stats. # phone home stats.
if hs.config.run_background_tasks: if hs.config.worker.run_background_tasks:
start_phone_stats_home(hs) start_phone_stats_home(hs)
# We now freeze all allocated objects in the hopes that (almost) # We now freeze all allocated objects in the hopes that (almost)
@ -433,9 +433,13 @@ def setup_sentry(hs):
# We set some default tags that give some context to this instance # We set some default tags that give some context to this instance
with sentry_sdk.configure_scope() as scope: with sentry_sdk.configure_scope() as scope:
scope.set_tag("matrix_server_name", hs.config.server_name) scope.set_tag("matrix_server_name", hs.config.server.server_name)
app = hs.config.worker_app if hs.config.worker_app else "synapse.app.homeserver" app = (
hs.config.worker.worker_app
if hs.config.worker.worker_app
else "synapse.app.homeserver"
)
name = hs.get_instance_name() name = hs.get_instance_name()
scope.set_tag("worker_app", app) scope.set_tag("worker_app", app)
scope.set_tag("worker_name", name) scope.set_tag("worker_name", name)

View file

@ -178,12 +178,12 @@ def start(config_options):
sys.stderr.write("\n" + str(e) + "\n") sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1) sys.exit(1)
if config.worker_app is not None: if config.worker.worker_app is not None:
assert config.worker_app == "synapse.app.admin_cmd" assert config.worker.worker_app == "synapse.app.admin_cmd"
# Update the config with some basic overrides so that don't have to specify # Update the config with some basic overrides so that don't have to specify
# a full worker config. # a full worker config.
config.worker_app = "synapse.app.admin_cmd" config.worker.worker_app = "synapse.app.admin_cmd"
if ( if (
not config.worker_daemonize not config.worker_daemonize
@ -196,7 +196,7 @@ def start(config_options):
# Explicitly disable background processes # Explicitly disable background processes
config.update_user_directory = False config.update_user_directory = False
config.run_background_tasks = False config.worker.run_background_tasks = False
config.start_pushers = False config.start_pushers = False
config.pusher_shard_config.instances = [] config.pusher_shard_config.instances = []
config.send_federation = False config.send_federation = False
@ -205,7 +205,7 @@ def start(config_options):
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
ss = AdminCmdServer( ss = AdminCmdServer(
config.server_name, config.server.server_name,
config=config, config=config,
version_string="Synapse/" + get_version_string(synapse), version_string="Synapse/" + get_version_string(synapse),
) )

View file

@ -416,7 +416,7 @@ def start(config_options):
sys.exit(1) sys.exit(1)
# For backwards compatibility let any of the old app names. # For backwards compatibility let any of the old app names.
assert config.worker_app in ( assert config.worker.worker_app in (
"synapse.app.appservice", "synapse.app.appservice",
"synapse.app.client_reader", "synapse.app.client_reader",
"synapse.app.event_creator", "synapse.app.event_creator",
@ -430,7 +430,7 @@ def start(config_options):
"synapse.app.user_dir", "synapse.app.user_dir",
) )
if config.worker_app == "synapse.app.appservice": if config.worker.worker_app == "synapse.app.appservice":
if config.appservice.notify_appservices: if config.appservice.notify_appservices:
sys.stderr.write( sys.stderr.write(
"\nThe appservices must be disabled in the main synapse process" "\nThe appservices must be disabled in the main synapse process"
@ -446,7 +446,7 @@ def start(config_options):
# For other worker types we force this to off. # For other worker types we force this to off.
config.appservice.notify_appservices = False config.appservice.notify_appservices = False
if config.worker_app == "synapse.app.user_dir": if config.worker.worker_app == "synapse.app.user_dir":
if config.server.update_user_directory: if config.server.update_user_directory:
sys.stderr.write( sys.stderr.write(
"\nThe update_user_directory must be disabled in the main synapse process" "\nThe update_user_directory must be disabled in the main synapse process"
@ -469,7 +469,7 @@ def start(config_options):
synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds
hs = GenericWorkerServer( hs = GenericWorkerServer(
config.server_name, config.server.server_name,
config=config, config=config,
version_string="Synapse/" + get_version_string(synapse), version_string="Synapse/" + get_version_string(synapse),
) )

View file

@ -350,7 +350,7 @@ def setup(config_options):
synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds
hs = SynapseHomeServer( hs = SynapseHomeServer(
config.server_name, config.server.server_name,
config=config, config=config,
version_string="Synapse/" + get_version_string(synapse), version_string="Synapse/" + get_version_string(synapse),
) )

View file

@ -73,7 +73,7 @@ async def phone_stats_home(hs, stats, stats_process=_stats_process):
store = hs.get_datastore() store = hs.get_datastore()
stats["homeserver"] = hs.config.server_name stats["homeserver"] = hs.config.server.server_name
stats["server_context"] = hs.config.server_context stats["server_context"] = hs.config.server_context
stats["timestamp"] = now stats["timestamp"] = now
stats["uptime_seconds"] = uptime stats["uptime_seconds"] = uptime

View file

@ -223,7 +223,7 @@ def _setup_stdlib_logging(config, log_config_path, logBeginner: LogBeginner) ->
# writes. # writes.
log_context_filter = LoggingContextFilter() log_context_filter = LoggingContextFilter()
log_metadata_filter = MetadataFilter({"server_name": config.server_name}) log_metadata_filter = MetadataFilter({"server_name": config.server.server_name})
old_factory = logging.getLogRecordFactory() old_factory = logging.getLogRecordFactory()
def factory(*args, **kwargs): def factory(*args, **kwargs):
@ -335,5 +335,5 @@ def setup_logging(
# Log immediately so we can grep backwards. # Log immediately so we can grep backwards.
logging.warning("***** STARTING SERVER *****") logging.warning("***** STARTING SERVER *****")
logging.warning("Server %s version %s", sys.argv[0], get_version_string(synapse)) logging.warning("Server %s version %s", sys.argv[0], get_version_string(synapse))
logging.info("Server hostname: %s", config.server_name) logging.info("Server hostname: %s", config.server.server_name)
logging.info("Instance name: %s", hs.get_instance_name()) logging.info("Instance name: %s", hs.get_instance_name())

View file

@ -88,7 +88,7 @@ class EventValidator:
self._validate_retention(event) self._validate_retention(event)
if event.type == EventTypes.ServerACL: if event.type == EventTypes.ServerACL:
if not server_matches_acl_event(config.server_name, event): if not server_matches_acl_event(config.server.server_name, event):
raise SynapseError( raise SynapseError(
400, "Can't create an ACL event that denies the local server" 400, "Can't create an ACL event that denies the local server"
) )

View file

@ -281,7 +281,8 @@ class FederationSender(AbstractFederationSender):
self._queues_awaiting_rr_flush_by_room: Dict[str, Set[PerDestinationQueue]] = {} self._queues_awaiting_rr_flush_by_room: Dict[str, Set[PerDestinationQueue]] = {}
self._rr_txn_interval_per_room_ms = ( self._rr_txn_interval_per_room_ms = (
1000.0 / hs.config.federation_rr_transactions_per_room_per_second 1000.0
/ hs.config.ratelimiting.federation_rr_transactions_per_room_per_second
) )
# wake up destinations that have outstanding PDUs to be caught up # wake up destinations that have outstanding PDUs to be caught up

View file

@ -144,7 +144,7 @@ class GroupAttestionRenewer:
self.is_mine_id = hs.is_mine_id self.is_mine_id = hs.is_mine_id
self.attestations = hs.get_groups_attestation_signing() self.attestations = hs.get_groups_attestation_signing()
if not hs.config.worker_app: if not hs.config.worker.worker_app:
self._renew_attestations_loop = self.clock.looping_call( self._renew_attestations_loop = self.clock.looping_call(
self._start_renew_attestations, 30 * 60 * 1000 self._start_renew_attestations, 30 * 60 * 1000
) )

View file

@ -45,16 +45,16 @@ class BaseHandler:
self.request_ratelimiter = Ratelimiter( self.request_ratelimiter = Ratelimiter(
store=self.store, clock=self.clock, rate_hz=0, burst_count=0 store=self.store, clock=self.clock, rate_hz=0, burst_count=0
) )
self._rc_message = self.hs.config.rc_message self._rc_message = self.hs.config.ratelimiting.rc_message
# Check whether ratelimiting room admin message redaction is enabled # Check whether ratelimiting room admin message redaction is enabled
# by the presence of rate limits in the config # by the presence of rate limits in the config
if self.hs.config.rc_admin_redaction: if self.hs.config.ratelimiting.rc_admin_redaction:
self.admin_redaction_ratelimiter: Optional[Ratelimiter] = Ratelimiter( self.admin_redaction_ratelimiter: Optional[Ratelimiter] = Ratelimiter(
store=self.store, store=self.store,
clock=self.clock, clock=self.clock,
rate_hz=self.hs.config.rc_admin_redaction.per_second, rate_hz=self.hs.config.ratelimiting.rc_admin_redaction.per_second,
burst_count=self.hs.config.rc_admin_redaction.burst_count, burst_count=self.hs.config.ratelimiting.rc_admin_redaction.burst_count,
) )
else: else:
self.admin_redaction_ratelimiter = None self.admin_redaction_ratelimiter = None

View file

@ -78,7 +78,7 @@ class AccountValidityHandler:
) )
# Check the renewal emails to send and send them every 30min. # Check the renewal emails to send and send them every 30min.
if hs.config.run_background_tasks: if hs.config.worker.run_background_tasks:
self.clock.looping_call(self._send_renewal_emails, 30 * 60 * 1000) self.clock.looping_call(self._send_renewal_emails, 30 * 60 * 1000)
self._is_user_expired_callbacks: List[IS_USER_EXPIRED_CALLBACK] = [] self._is_user_expired_callbacks: List[IS_USER_EXPIRED_CALLBACK] = []
@ -249,7 +249,7 @@ class AccountValidityHandler:
renewal_token = await self._get_renewal_token(user_id) renewal_token = await self._get_renewal_token(user_id)
url = "%s_matrix/client/unstable/account_validity/renew?token=%s" % ( url = "%s_matrix/client/unstable/account_validity/renew?token=%s" % (
self.hs.config.public_baseurl, self.hs.config.server.public_baseurl,
renewal_token, renewal_token,
) )

View file

@ -244,8 +244,8 @@ class AuthHandler(BaseHandler):
self._failed_uia_attempts_ratelimiter = Ratelimiter( self._failed_uia_attempts_ratelimiter = Ratelimiter(
store=self.store, store=self.store,
clock=self.clock, clock=self.clock,
rate_hz=self.hs.config.rc_login_failed_attempts.per_second, rate_hz=self.hs.config.ratelimiting.rc_login_failed_attempts.per_second,
burst_count=self.hs.config.rc_login_failed_attempts.burst_count, burst_count=self.hs.config.ratelimiting.rc_login_failed_attempts.burst_count,
) )
# The number of seconds to keep a UI auth session active. # The number of seconds to keep a UI auth session active.
@ -255,14 +255,14 @@ class AuthHandler(BaseHandler):
self._failed_login_attempts_ratelimiter = Ratelimiter( self._failed_login_attempts_ratelimiter = Ratelimiter(
store=self.store, store=self.store,
clock=hs.get_clock(), clock=hs.get_clock(),
rate_hz=self.hs.config.rc_login_failed_attempts.per_second, rate_hz=self.hs.config.ratelimiting.rc_login_failed_attempts.per_second,
burst_count=self.hs.config.rc_login_failed_attempts.burst_count, burst_count=self.hs.config.ratelimiting.rc_login_failed_attempts.burst_count,
) )
self._clock = self.hs.get_clock() self._clock = self.hs.get_clock()
# Expire old UI auth sessions after a period of time. # Expire old UI auth sessions after a period of time.
if hs.config.run_background_tasks: if hs.config.worker.run_background_tasks:
self._clock.looping_call( self._clock.looping_call(
run_as_background_process, run_as_background_process,
5 * 60 * 1000, 5 * 60 * 1000,
@ -289,7 +289,7 @@ class AuthHandler(BaseHandler):
hs.config.sso_account_deactivated_template hs.config.sso_account_deactivated_template
) )
self._server_name = hs.config.server_name self._server_name = hs.config.server.server_name
# cast to tuple for use with str.startswith # cast to tuple for use with str.startswith
self._whitelisted_sso_clients = tuple(hs.config.sso_client_whitelist) self._whitelisted_sso_clients = tuple(hs.config.sso_client_whitelist)
@ -749,7 +749,7 @@ class AuthHandler(BaseHandler):
"name": self.hs.config.user_consent_policy_name, "name": self.hs.config.user_consent_policy_name,
"url": "%s_matrix/consent?v=%s" "url": "%s_matrix/consent?v=%s"
% ( % (
self.hs.config.public_baseurl, self.hs.config.server.public_baseurl,
self.hs.config.user_consent_version, self.hs.config.user_consent_version,
), ),
}, },
@ -1799,7 +1799,7 @@ class MacaroonGenerator:
def _generate_base_macaroon(self, user_id: str) -> pymacaroons.Macaroon: def _generate_base_macaroon(self, user_id: str) -> pymacaroons.Macaroon:
macaroon = pymacaroons.Macaroon( macaroon = pymacaroons.Macaroon(
location=self.hs.config.server_name, location=self.hs.config.server.server_name,
identifier="key", identifier="key",
key=self.hs.config.macaroon_secret_key, key=self.hs.config.macaroon_secret_key,
) )

View file

@ -46,7 +46,7 @@ class DeactivateAccountHandler(BaseHandler):
# Start the user parter loop so it can resume parting users from rooms where # Start the user parter loop so it can resume parting users from rooms where
# it left off (if it has work left to do). # it left off (if it has work left to do).
if hs.config.run_background_tasks: if hs.config.worker.run_background_tasks:
hs.get_reactor().callWhenRunning(self._start_user_parting) hs.get_reactor().callWhenRunning(self._start_user_parting)
self._account_validity_enabled = ( self._account_validity_enabled = (

View file

@ -84,8 +84,8 @@ class DeviceMessageHandler:
self._ratelimiter = Ratelimiter( self._ratelimiter = Ratelimiter(
store=self.store, store=self.store,
clock=hs.get_clock(), clock=hs.get_clock(),
rate_hz=hs.config.rc_key_requests.per_second, rate_hz=hs.config.ratelimiting.rc_key_requests.per_second,
burst_count=hs.config.rc_key_requests.burst_count, burst_count=hs.config.ratelimiting.rc_key_requests.burst_count,
) )
async def on_direct_to_device_edu(self, origin: str, content: JsonDict) -> None: async def on_direct_to_device_edu(self, origin: str, content: JsonDict) -> None:

View file

@ -57,7 +57,7 @@ class E2eKeysHandler:
federation_registry = hs.get_federation_registry() federation_registry = hs.get_federation_registry()
self._is_master = hs.config.worker_app is None self._is_master = hs.config.worker.worker_app is None
if not self._is_master: if not self._is_master:
self._user_device_resync_client = ( self._user_device_resync_client = (
ReplicationUserDevicesResyncRestServlet.make_client(hs) ReplicationUserDevicesResyncRestServlet.make_client(hs)

View file

@ -101,7 +101,7 @@ class FederationHandler(BaseHandler):
hs hs
) )
if hs.config.worker_app: if hs.config.worker.worker_app:
self._maybe_store_room_on_outlier_membership = ( self._maybe_store_room_on_outlier_membership = (
ReplicationStoreRoomOnOutlierMembershipRestServlet.make_client(hs) ReplicationStoreRoomOnOutlierMembershipRestServlet.make_client(hs)
) )
@ -1614,7 +1614,7 @@ class FederationHandler(BaseHandler):
Args: Args:
room_id room_id
""" """
if self.config.worker_app: if self.config.worker.worker_app:
await self._clean_room_for_join_client(room_id) await self._clean_room_for_join_client(room_id)
else: else:
await self.store.clean_room_for_join(room_id) await self.store.clean_room_for_join(room_id)

View file

@ -149,7 +149,7 @@ class FederationEventHandler:
self._ephemeral_messages_enabled = hs.config.server.enable_ephemeral_messages self._ephemeral_messages_enabled = hs.config.server.enable_ephemeral_messages
self._send_events = ReplicationFederationSendEventsRestServlet.make_client(hs) self._send_events = ReplicationFederationSendEventsRestServlet.make_client(hs)
if hs.config.worker_app: if hs.config.worker.worker_app:
self._user_device_resync = ( self._user_device_resync = (
ReplicationUserDevicesResyncRestServlet.make_client(hs) ReplicationUserDevicesResyncRestServlet.make_client(hs)
) )
@ -1009,7 +1009,7 @@ class FederationEventHandler:
await self._store.mark_remote_user_device_cache_as_stale(sender) await self._store.mark_remote_user_device_cache_as_stale(sender)
# Immediately attempt a resync in the background # Immediately attempt a resync in the background
if self._config.worker_app: if self._config.worker.worker_app:
await self._user_device_resync(user_id=sender) await self._user_device_resync(user_id=sender)
else: else:
await self._device_list_updater.user_device_resync(sender) await self._device_list_updater.user_device_resync(sender)

View file

@ -540,13 +540,13 @@ class IdentityHandler(BaseHandler):
# It is already checked that public_baseurl is configured since this code # It is already checked that public_baseurl is configured since this code
# should only be used if account_threepid_delegate_msisdn is true. # should only be used if account_threepid_delegate_msisdn is true.
assert self.hs.config.public_baseurl assert self.hs.config.server.public_baseurl
# we need to tell the client to send the token back to us, since it doesn't # we need to tell the client to send the token back to us, since it doesn't
# otherwise know where to send it, so add submit_url response parameter # otherwise know where to send it, so add submit_url response parameter
# (see also MSC2078) # (see also MSC2078)
data["submit_url"] = ( data["submit_url"] = (
self.hs.config.public_baseurl self.hs.config.server.public_baseurl
+ "_matrix/client/unstable/add_threepid/msisdn/submit_token" + "_matrix/client/unstable/add_threepid/msisdn/submit_token"
) )
return data return data

View file

@ -84,7 +84,7 @@ class MessageHandler:
# scheduled. # scheduled.
self._scheduled_expiry: Optional[IDelayedCall] = None self._scheduled_expiry: Optional[IDelayedCall] = None
if not hs.config.worker_app: if not hs.config.worker.worker_app:
run_as_background_process( run_as_background_process(
"_schedule_next_expiry", self._schedule_next_expiry "_schedule_next_expiry", self._schedule_next_expiry
) )
@ -461,7 +461,7 @@ class EventCreationHandler:
self._dummy_events_threshold = hs.config.dummy_events_threshold self._dummy_events_threshold = hs.config.dummy_events_threshold
if ( if (
self.config.run_background_tasks self.config.worker.run_background_tasks
and self.config.cleanup_extremities_with_dummy_events and self.config.cleanup_extremities_with_dummy_events
): ):
self.clock.looping_call( self.clock.looping_call(

View file

@ -324,7 +324,7 @@ class OidcProvider:
self._allow_existing_users = provider.allow_existing_users self._allow_existing_users = provider.allow_existing_users
self._http_client = hs.get_proxied_http_client() self._http_client = hs.get_proxied_http_client()
self._server_name: str = hs.config.server_name self._server_name: str = hs.config.server.server_name
# identifier for the external_ids table # identifier for the external_ids table
self.idp_id = provider.idp_id self.idp_id = provider.idp_id

View file

@ -91,7 +91,7 @@ class PaginationHandler:
self._retention_allowed_lifetime_min = hs.config.retention_allowed_lifetime_min self._retention_allowed_lifetime_min = hs.config.retention_allowed_lifetime_min
self._retention_allowed_lifetime_max = hs.config.retention_allowed_lifetime_max self._retention_allowed_lifetime_max = hs.config.retention_allowed_lifetime_max
if hs.config.run_background_tasks and hs.config.retention_enabled: if hs.config.worker.run_background_tasks and hs.config.retention_enabled:
# Run the purge jobs described in the configuration file. # Run the purge jobs described in the configuration file.
for job in hs.config.retention_purge_jobs: for job in hs.config.retention_purge_jobs:
logger.info("Setting up purge job with config: %s", job) logger.info("Setting up purge job with config: %s", job)

View file

@ -63,7 +63,7 @@ class ProfileHandler(BaseHandler):
self.user_directory_handler = hs.get_user_directory_handler() self.user_directory_handler = hs.get_user_directory_handler()
if hs.config.run_background_tasks: if hs.config.worker.run_background_tasks:
self.clock.looping_call( self.clock.looping_call(
self._update_remote_profile_cache, self.PROFILE_UPDATE_MS self._update_remote_profile_cache, self.PROFILE_UPDATE_MS
) )

View file

@ -28,7 +28,7 @@ logger = logging.getLogger(__name__)
class ReadMarkerHandler(BaseHandler): class ReadMarkerHandler(BaseHandler):
def __init__(self, hs: "HomeServer"): def __init__(self, hs: "HomeServer"):
super().__init__(hs) super().__init__(hs)
self.server_name = hs.config.server_name self.server_name = hs.config.server.server_name
self.store = hs.get_datastore() self.store = hs.get_datastore()
self.account_data_handler = hs.get_account_data_handler() self.account_data_handler = hs.get_account_data_handler()
self.read_marker_linearizer = Linearizer(name="read_marker") self.read_marker_linearizer = Linearizer(name="read_marker")

View file

@ -29,7 +29,7 @@ class ReceiptsHandler(BaseHandler):
def __init__(self, hs: "HomeServer"): def __init__(self, hs: "HomeServer"):
super().__init__(hs) super().__init__(hs)
self.server_name = hs.config.server_name self.server_name = hs.config.server.server_name
self.store = hs.get_datastore() self.store = hs.get_datastore()
self.event_auth_handler = hs.get_event_auth_handler() self.event_auth_handler = hs.get_event_auth_handler()

View file

@ -102,7 +102,7 @@ class RegistrationHandler(BaseHandler):
self.spam_checker = hs.get_spam_checker() self.spam_checker = hs.get_spam_checker()
if hs.config.worker_app: if hs.config.worker.worker_app:
self._register_client = ReplicationRegisterServlet.make_client(hs) self._register_client = ReplicationRegisterServlet.make_client(hs)
self._register_device_client = RegisterDeviceReplicationServlet.make_client( self._register_device_client = RegisterDeviceReplicationServlet.make_client(
hs hs
@ -696,7 +696,7 @@ class RegistrationHandler(BaseHandler):
address: the IP address used to perform the registration. address: the IP address used to perform the registration.
shadow_banned: Whether to shadow-ban the user shadow_banned: Whether to shadow-ban the user
""" """
if self.hs.config.worker_app: if self.hs.config.worker.worker_app:
await self._register_client( await self._register_client(
user_id=user_id, user_id=user_id,
password_hash=password_hash, password_hash=password_hash,
@ -786,7 +786,7 @@ class RegistrationHandler(BaseHandler):
Does the bits that need doing on the main process. Not for use outside this Does the bits that need doing on the main process. Not for use outside this
class and RegisterDeviceReplicationServlet. class and RegisterDeviceReplicationServlet.
""" """
assert not self.hs.config.worker_app assert not self.hs.config.worker.worker_app
valid_until_ms = None valid_until_ms = None
if self.session_lifetime is not None: if self.session_lifetime is not None:
if is_guest: if is_guest:
@ -843,7 +843,7 @@ class RegistrationHandler(BaseHandler):
""" """
# TODO: 3pid registration can actually happen on the workers. Consider # TODO: 3pid registration can actually happen on the workers. Consider
# refactoring it. # refactoring it.
if self.hs.config.worker_app: if self.hs.config.worker.worker_app:
await self._post_registration_client( await self._post_registration_client(
user_id=user_id, auth_result=auth_result, access_token=access_token user_id=user_id, auth_result=auth_result, access_token=access_token
) )

View file

@ -54,7 +54,7 @@ class StatsHandler:
# Guard to ensure we only process deltas one at a time # Guard to ensure we only process deltas one at a time
self._is_processing = False self._is_processing = False
if self.stats_enabled and hs.config.run_background_tasks: if self.stats_enabled and hs.config.worker.run_background_tasks:
self.notifier.add_replication_callback(self.notify_new_event) self.notifier.add_replication_callback(self.notify_new_event)
# We kick this off so that we don't have to wait for a change before # We kick this off so that we don't have to wait for a change before

View file

@ -53,7 +53,7 @@ class FollowerTypingHandler:
def __init__(self, hs: "HomeServer"): def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastore() self.store = hs.get_datastore()
self.server_name = hs.config.server_name self.server_name = hs.config.server.server_name
self.clock = hs.get_clock() self.clock = hs.get_clock()
self.is_mine_id = hs.is_mine_id self.is_mine_id = hs.is_mine_id

View file

@ -383,7 +383,7 @@ def init_tracer(hs: "HomeServer"):
config = JaegerConfig( config = JaegerConfig(
config=hs.config.jaeger_config, config=hs.config.jaeger_config,
service_name=f"{hs.config.server_name} {hs.get_instance_name()}", service_name=f"{hs.config.server.server_name} {hs.get_instance_name()}",
scope_manager=LogContextScopeManager(hs.config), scope_manager=LogContextScopeManager(hs.config),
metrics_factory=PrometheusMetricsFactory(), metrics_factory=PrometheusMetricsFactory(),
) )

View file

@ -178,7 +178,7 @@ class ModuleApi:
@property @property
def public_baseurl(self) -> str: def public_baseurl(self) -> str:
"""The configured public base URL for this homeserver.""" """The configured public base URL for this homeserver."""
return self._hs.config.public_baseurl return self._hs.config.server.public_baseurl
@property @property
def email_app_name(self) -> str: def email_app_name(self) -> str:
@ -640,7 +640,7 @@ class ModuleApi:
if desc is None: if desc is None:
desc = f.__name__ desc = f.__name__
if self._hs.config.run_background_tasks or run_on_all_instances: if self._hs.config.worker.run_background_tasks or run_on_all_instances:
self._clock.looping_call( self._clock.looping_call(
run_as_background_process, run_as_background_process,
msec, msec,

View file

@ -130,7 +130,7 @@ class Mailer:
""" """
params = {"token": token, "client_secret": client_secret, "sid": sid} params = {"token": token, "client_secret": client_secret, "sid": sid}
link = ( link = (
self.hs.config.public_baseurl self.hs.config.server.public_baseurl
+ "_synapse/client/password_reset/email/submit_token?%s" + "_synapse/client/password_reset/email/submit_token?%s"
% urllib.parse.urlencode(params) % urllib.parse.urlencode(params)
) )
@ -140,7 +140,7 @@ class Mailer:
await self.send_email( await self.send_email(
email_address, email_address,
self.email_subjects.password_reset self.email_subjects.password_reset
% {"server_name": self.hs.config.server_name}, % {"server_name": self.hs.config.server.server_name},
template_vars, template_vars,
) )
@ -160,7 +160,7 @@ class Mailer:
""" """
params = {"token": token, "client_secret": client_secret, "sid": sid} params = {"token": token, "client_secret": client_secret, "sid": sid}
link = ( link = (
self.hs.config.public_baseurl self.hs.config.server.public_baseurl
+ "_matrix/client/unstable/registration/email/submit_token?%s" + "_matrix/client/unstable/registration/email/submit_token?%s"
% urllib.parse.urlencode(params) % urllib.parse.urlencode(params)
) )
@ -170,7 +170,7 @@ class Mailer:
await self.send_email( await self.send_email(
email_address, email_address,
self.email_subjects.email_validation self.email_subjects.email_validation
% {"server_name": self.hs.config.server_name}, % {"server_name": self.hs.config.server.server_name},
template_vars, template_vars,
) )
@ -191,7 +191,7 @@ class Mailer:
""" """
params = {"token": token, "client_secret": client_secret, "sid": sid} params = {"token": token, "client_secret": client_secret, "sid": sid}
link = ( link = (
self.hs.config.public_baseurl self.hs.config.server.public_baseurl
+ "_matrix/client/unstable/add_threepid/email/submit_token?%s" + "_matrix/client/unstable/add_threepid/email/submit_token?%s"
% urllib.parse.urlencode(params) % urllib.parse.urlencode(params)
) )
@ -201,7 +201,7 @@ class Mailer:
await self.send_email( await self.send_email(
email_address, email_address,
self.email_subjects.email_validation self.email_subjects.email_validation
% {"server_name": self.hs.config.server_name}, % {"server_name": self.hs.config.server.server_name},
template_vars, template_vars,
) )
@ -852,7 +852,7 @@ class Mailer:
# XXX: make r0 once API is stable # XXX: make r0 once API is stable
return "%s_matrix/client/unstable/pushers/remove?%s" % ( return "%s_matrix/client/unstable/pushers/remove?%s" % (
self.hs.config.public_baseurl, self.hs.config.server.public_baseurl,
urllib.parse.urlencode(params), urllib.parse.urlencode(params),
) )

View file

@ -73,7 +73,7 @@ class DirectTcpReplicationClientFactory(ReconnectingClientFactory):
): ):
self.client_name = client_name self.client_name = client_name
self.command_handler = command_handler self.command_handler = command_handler
self.server_name = hs.config.server_name self.server_name = hs.config.server.server_name
self.hs = hs self.hs = hs
self._clock = hs.get_clock() # As self.clock is defined in super class self._clock = hs.get_clock() # As self.clock is defined in super class

View file

@ -168,7 +168,7 @@ class ReplicationCommandHandler:
continue continue
# Only add any other streams if we're on master. # Only add any other streams if we're on master.
if hs.config.worker_app is not None: if hs.config.worker.worker_app is not None:
continue continue
if stream.NAME == FederationStream.NAME and hs.config.send_federation: if stream.NAME == FederationStream.NAME and hs.config.send_federation:
@ -222,7 +222,7 @@ class ReplicationCommandHandler:
}, },
) )
self._is_master = hs.config.worker_app is None self._is_master = hs.config.worker.worker_app is None
self._federation_sender = None self._federation_sender = None
if self._is_master and not hs.config.send_federation: if self._is_master and not hs.config.send_federation:

View file

@ -40,7 +40,7 @@ class ReplicationStreamProtocolFactory(Factory):
def __init__(self, hs): def __init__(self, hs):
self.command_handler = hs.get_tcp_replication() self.command_handler = hs.get_tcp_replication()
self.clock = hs.get_clock() self.clock = hs.get_clock()
self.server_name = hs.config.server_name self.server_name = hs.config.server.server_name
# If we've created a `ReplicationStreamProtocolFactory` then we're # If we've created a `ReplicationStreamProtocolFactory` then we're
# almost certainly registering a replication listener, so let's ensure # almost certainly registering a replication listener, so let's ensure

View file

@ -42,7 +42,7 @@ class FederationStream(Stream):
ROW_TYPE = FederationStreamRow ROW_TYPE = FederationStreamRow
def __init__(self, hs: "HomeServer"): def __init__(self, hs: "HomeServer"):
if hs.config.worker_app is None: if hs.config.worker.worker_app is None:
# master process: get updates from the FederationRemoteSendQueue. # master process: get updates from the FederationRemoteSendQueue.
# (if the master is configured to send federation itself, federation_sender # (if the master is configured to send federation itself, federation_sender
# will be a real FederationSender, which has stubs for current_token and # will be a real FederationSender, which has stubs for current_token and

View file

@ -247,7 +247,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
RegistrationTokenRestServlet(hs).register(http_server) RegistrationTokenRestServlet(hs).register(http_server)
# Some servlets only get registered for the main process. # Some servlets only get registered for the main process.
if hs.config.worker_app is None: if hs.config.worker.worker_app is None:
SendServerNoticeServlet(hs).register(http_server) SendServerNoticeServlet(hs).register(http_server)

View file

@ -68,7 +68,10 @@ class AuthRestServlet(RestServlet):
html = self.terms_template.render( html = self.terms_template.render(
session=session, session=session,
terms_url="%s_matrix/consent?v=%s" terms_url="%s_matrix/consent?v=%s"
% (self.hs.config.public_baseurl, self.hs.config.user_consent_version), % (
self.hs.config.server.public_baseurl,
self.hs.config.user_consent_version,
),
myurl="%s/r0/auth/%s/fallback/web" myurl="%s/r0/auth/%s/fallback/web"
% (CLIENT_API_PREFIX, LoginType.TERMS), % (CLIENT_API_PREFIX, LoginType.TERMS),
) )
@ -135,7 +138,7 @@ class AuthRestServlet(RestServlet):
session=session, session=session,
terms_url="%s_matrix/consent?v=%s" terms_url="%s_matrix/consent?v=%s"
% ( % (
self.hs.config.public_baseurl, self.hs.config.server.public_baseurl,
self.hs.config.user_consent_version, self.hs.config.user_consent_version,
), ),
myurl="%s/r0/auth/%s/fallback/web" myurl="%s/r0/auth/%s/fallback/web"

View file

@ -93,14 +93,14 @@ class LoginRestServlet(RestServlet):
self._address_ratelimiter = Ratelimiter( self._address_ratelimiter = Ratelimiter(
store=hs.get_datastore(), store=hs.get_datastore(),
clock=hs.get_clock(), clock=hs.get_clock(),
rate_hz=self.hs.config.rc_login_address.per_second, rate_hz=self.hs.config.ratelimiting.rc_login_address.per_second,
burst_count=self.hs.config.rc_login_address.burst_count, burst_count=self.hs.config.ratelimiting.rc_login_address.burst_count,
) )
self._account_ratelimiter = Ratelimiter( self._account_ratelimiter = Ratelimiter(
store=hs.get_datastore(), store=hs.get_datastore(),
clock=hs.get_clock(), clock=hs.get_clock(),
rate_hz=self.hs.config.rc_login_account.per_second, rate_hz=self.hs.config.ratelimiting.rc_login_account.per_second,
burst_count=self.hs.config.rc_login_account.burst_count, burst_count=self.hs.config.ratelimiting.rc_login_account.burst_count,
) )
# ensure the CAS/SAML/OIDC handlers are loaded on this worker instance. # ensure the CAS/SAML/OIDC handlers are loaded on this worker instance.
@ -486,7 +486,7 @@ class SsoRedirectServlet(RestServlet):
# register themselves with the main SSOHandler. # register themselves with the main SSOHandler.
_load_sso_handlers(hs) _load_sso_handlers(hs)
self._sso_handler = hs.get_sso_handler() self._sso_handler = hs.get_sso_handler()
self._public_baseurl = hs.config.public_baseurl self._public_baseurl = hs.config.server.public_baseurl
async def on_GET( async def on_GET(
self, request: SynapseRequest, idp_id: Optional[str] = None self, request: SynapseRequest, idp_id: Optional[str] = None

View file

@ -69,7 +69,7 @@ class IdTokenServlet(RestServlet):
self.auth = hs.get_auth() self.auth = hs.get_auth()
self.store = hs.get_datastore() self.store = hs.get_datastore()
self.clock = hs.get_clock() self.clock = hs.get_clock()
self.server_name = hs.config.server_name self.server_name = hs.config.server.server_name
async def on_POST( async def on_POST(
self, request: SynapseRequest, user_id: str self, request: SynapseRequest, user_id: str

View file

@ -59,7 +59,7 @@ class PushRuleRestServlet(RestServlet):
self.auth = hs.get_auth() self.auth = hs.get_auth()
self.store = hs.get_datastore() self.store = hs.get_datastore()
self.notifier = hs.get_notifier() self.notifier = hs.get_notifier()
self._is_worker = hs.config.worker_app is not None self._is_worker = hs.config.worker.worker_app is not None
self._users_new_default_push_rules = hs.config.users_new_default_push_rules self._users_new_default_push_rules = hs.config.users_new_default_push_rules

View file

@ -388,7 +388,7 @@ class PublicRoomListRestServlet(TransactionRestServlet):
limit = None limit = None
handler = self.hs.get_room_list_handler() handler = self.hs.get_room_list_handler()
if server and server != self.hs.config.server_name: if server and server != self.hs.config.server.server_name:
# Ensure the server is valid. # Ensure the server is valid.
try: try:
parse_and_validate_server_name(server) parse_and_validate_server_name(server)
@ -438,7 +438,7 @@ class PublicRoomListRestServlet(TransactionRestServlet):
limit = None limit = None
handler = self.hs.get_room_list_handler() handler = self.hs.get_room_list_handler()
if server and server != self.hs.config.server_name: if server and server != self.hs.config.server.server_name:
# Ensure the server is valid. # Ensure the server is valid.
try: try:
parse_and_validate_server_name(server) parse_and_validate_server_name(server)

View file

@ -86,12 +86,12 @@ class LocalKey(Resource):
json_object = { json_object = {
"valid_until_ts": self.valid_until_ts, "valid_until_ts": self.valid_until_ts,
"server_name": self.config.server_name, "server_name": self.config.server.server_name,
"verify_keys": verify_keys, "verify_keys": verify_keys,
"old_verify_keys": old_verify_keys, "old_verify_keys": old_verify_keys,
} }
for key in self.config.signing_key: for key in self.config.signing_key:
json_object = sign_json(json_object, self.config.server_name, key) json_object = sign_json(json_object, self.config.server.server_name, key)
return json_object return json_object
def render_GET(self, request): def render_GET(self, request):

View file

@ -224,7 +224,9 @@ class RemoteKey(DirectServeJsonResource):
for key_json in json_results: for key_json in json_results:
key_json = json_decoder.decode(key_json.decode("utf-8")) key_json = json_decoder.decode(key_json.decode("utf-8"))
for signing_key in self.config.key_server_signing_keys: for signing_key in self.config.key_server_signing_keys:
key_json = sign_json(key_json, self.config.server_name, signing_key) key_json = sign_json(
key_json, self.config.server.server_name, signing_key
)
signed_keys.append(key_json) signed_keys.append(key_json)

View file

@ -34,10 +34,10 @@ class WellKnownBuilder:
def get_well_known(self): def get_well_known(self):
# if we don't have a public_baseurl, we can't help much here. # if we don't have a public_baseurl, we can't help much here.
if self._config.public_baseurl is None: if self._config.server.public_baseurl is None:
return None return None
result = {"m.homeserver": {"base_url": self._config.public_baseurl}} result = {"m.homeserver": {"base_url": self._config.server.public_baseurl}}
if self._config.default_identity_server: if self._config.default_identity_server:
result["m.identity_server"] = { result["m.identity_server"] = {

View file

@ -313,7 +313,7 @@ class HomeServer(metaclass=abc.ABCMeta):
# Register background tasks required by this server. This must be done # Register background tasks required by this server. This must be done
# somewhat manually due to the background tasks not being registered # somewhat manually due to the background tasks not being registered
# unless handlers are instantiated. # unless handlers are instantiated.
if self.config.run_background_tasks: if self.config.worker.run_background_tasks:
self.setup_background_tasks() self.setup_background_tasks()
def start_listening(self) -> None: def start_listening(self) -> None:
@ -370,8 +370,8 @@ class HomeServer(metaclass=abc.ABCMeta):
return Ratelimiter( return Ratelimiter(
store=self.get_datastore(), store=self.get_datastore(),
clock=self.get_clock(), clock=self.get_clock(),
rate_hz=self.config.rc_registration.per_second, rate_hz=self.config.ratelimiting.rc_registration.per_second,
burst_count=self.config.rc_registration.burst_count, burst_count=self.config.ratelimiting.rc_registration.burst_count,
) )
@cache_in_self @cache_in_self
@ -498,7 +498,7 @@ class HomeServer(metaclass=abc.ABCMeta):
@cache_in_self @cache_in_self
def get_device_handler(self): def get_device_handler(self):
if self.config.worker_app: if self.config.worker.worker_app:
return DeviceWorkerHandler(self) return DeviceWorkerHandler(self)
else: else:
return DeviceHandler(self) return DeviceHandler(self)
@ -621,7 +621,7 @@ class HomeServer(metaclass=abc.ABCMeta):
def get_federation_sender(self) -> AbstractFederationSender: def get_federation_sender(self) -> AbstractFederationSender:
if self.should_send_federation(): if self.should_send_federation():
return FederationSender(self) return FederationSender(self)
elif not self.config.worker_app: elif not self.config.worker.worker_app:
return FederationRemoteSendQueue(self) return FederationRemoteSendQueue(self)
else: else:
raise Exception("Workers cannot send federation traffic") raise Exception("Workers cannot send federation traffic")
@ -650,14 +650,14 @@ class HomeServer(metaclass=abc.ABCMeta):
def get_groups_local_handler( def get_groups_local_handler(
self, self,
) -> Union[GroupsLocalWorkerHandler, GroupsLocalHandler]: ) -> Union[GroupsLocalWorkerHandler, GroupsLocalHandler]:
if self.config.worker_app: if self.config.worker.worker_app:
return GroupsLocalWorkerHandler(self) return GroupsLocalWorkerHandler(self)
else: else:
return GroupsLocalHandler(self) return GroupsLocalHandler(self)
@cache_in_self @cache_in_self
def get_groups_server_handler(self): def get_groups_server_handler(self):
if self.config.worker_app: if self.config.worker.worker_app:
return GroupsServerWorkerHandler(self) return GroupsServerWorkerHandler(self)
else: else:
return GroupsServerHandler(self) return GroupsServerHandler(self)
@ -684,7 +684,7 @@ class HomeServer(metaclass=abc.ABCMeta):
@cache_in_self @cache_in_self
def get_room_member_handler(self) -> RoomMemberHandler: def get_room_member_handler(self) -> RoomMemberHandler:
if self.config.worker_app: if self.config.worker.worker_app:
return RoomMemberWorkerHandler(self) return RoomMemberWorkerHandler(self)
return RoomMemberMasterHandler(self) return RoomMemberMasterHandler(self)
@ -694,13 +694,13 @@ class HomeServer(metaclass=abc.ABCMeta):
@cache_in_self @cache_in_self
def get_server_notices_manager(self) -> ServerNoticesManager: def get_server_notices_manager(self) -> ServerNoticesManager:
if self.config.worker_app: if self.config.worker.worker_app:
raise Exception("Workers cannot send server notices") raise Exception("Workers cannot send server notices")
return ServerNoticesManager(self) return ServerNoticesManager(self)
@cache_in_self @cache_in_self
def get_server_notices_sender(self) -> WorkerServerNoticesSender: def get_server_notices_sender(self) -> WorkerServerNoticesSender:
if self.config.worker_app: if self.config.worker.worker_app:
return WorkerServerNoticesSender(self) return WorkerServerNoticesSender(self)
return ServerNoticesSender(self) return ServerNoticesSender(self)
@ -766,7 +766,9 @@ class HomeServer(metaclass=abc.ABCMeta):
@cache_in_self @cache_in_self
def get_federation_ratelimiter(self) -> FederationRateLimiter: def get_federation_ratelimiter(self) -> FederationRateLimiter:
return FederationRateLimiter(self.get_clock(), config=self.config.rc_federation) return FederationRateLimiter(
self.get_clock(), config=self.config.ratelimiting.rc_federation
)
@cache_in_self @cache_in_self
def get_module_api(self) -> ModuleApi: def get_module_api(self) -> ModuleApi:

View file

@ -271,7 +271,7 @@ class DataStore(
def get_users_paginate_txn(txn): def get_users_paginate_txn(txn):
filters = [] filters = []
args = [self.hs.config.server_name] args = [self.hs.config.server.server_name]
# Set ordering # Set ordering
order_by_column = UserSortOrder(order_by).value order_by_column = UserSortOrder(order_by).value
@ -356,13 +356,13 @@ def check_database_before_upgrade(cur, database_engine, config: HomeServerConfig
return return
user_domain = get_domain_from_id(rows[0][0]) user_domain = get_domain_from_id(rows[0][0])
if user_domain == config.server_name: if user_domain == config.server.server_name:
return return
raise Exception( raise Exception(
"Found users in database not native to %s!\n" "Found users in database not native to %s!\n"
"You cannot change a synapse server_name after it's been configured" "You cannot change a synapse server_name after it's been configured"
% (config.server_name,) % (config.server.server_name,)
) )

View file

@ -35,7 +35,7 @@ class CensorEventsStore(EventsWorkerStore, CacheInvalidationWorkerStore, SQLBase
super().__init__(database, db_conn, hs) super().__init__(database, db_conn, hs)
if ( if (
hs.config.run_background_tasks hs.config.worker.run_background_tasks
and self.hs.config.redaction_retention_period is not None and self.hs.config.redaction_retention_period is not None
): ):
hs.get_clock().looping_call(self._censor_redactions, 5 * 60 * 1000) hs.get_clock().looping_call(self._censor_redactions, 5 * 60 * 1000)

View file

@ -355,7 +355,7 @@ class ClientIpWorkerStore(ClientIpBackgroundUpdateStore):
self.user_ips_max_age = hs.config.user_ips_max_age self.user_ips_max_age = hs.config.user_ips_max_age
if hs.config.run_background_tasks and self.user_ips_max_age: if hs.config.worker.run_background_tasks and self.user_ips_max_age:
self._clock.looping_call(self._prune_old_user_ips, 5 * 1000) self._clock.looping_call(self._prune_old_user_ips, 5 * 1000)
@wrap_as_background_process("prune_old_user_ips") @wrap_as_background_process("prune_old_user_ips")

View file

@ -51,7 +51,7 @@ class DeviceWorkerStore(SQLBaseStore):
def __init__(self, database: DatabasePool, db_conn, hs): def __init__(self, database: DatabasePool, db_conn, hs):
super().__init__(database, db_conn, hs) super().__init__(database, db_conn, hs)
if hs.config.run_background_tasks: if hs.config.worker.run_background_tasks:
self._clock.looping_call( self._clock.looping_call(
self._prune_old_outbound_device_pokes, 60 * 60 * 1000 self._prune_old_outbound_device_pokes, 60 * 60 * 1000
) )

View file

@ -62,7 +62,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
def __init__(self, database: DatabasePool, db_conn, hs): def __init__(self, database: DatabasePool, db_conn, hs):
super().__init__(database, db_conn, hs) super().__init__(database, db_conn, hs)
if hs.config.run_background_tasks: if hs.config.worker.run_background_tasks:
hs.get_clock().looping_call( hs.get_clock().looping_call(
self._delete_old_forward_extrem_cache, 60 * 60 * 1000 self._delete_old_forward_extrem_cache, 60 * 60 * 1000
) )

View file

@ -82,7 +82,7 @@ class EventPushActionsWorkerStore(SQLBaseStore):
self._rotate_delay = 3 self._rotate_delay = 3
self._rotate_count = 10000 self._rotate_count = 10000
self._doing_notif_rotation = False self._doing_notif_rotation = False
if hs.config.run_background_tasks: if hs.config.worker.run_background_tasks:
self._rotate_notif_loop = self._clock.looping_call( self._rotate_notif_loop = self._clock.looping_call(
self._rotate_notifs, 30 * 60 * 1000 self._rotate_notifs, 30 * 60 * 1000
) )

View file

@ -158,7 +158,7 @@ class EventsWorkerStore(SQLBaseStore):
db_conn, "events", "stream_ordering", step=-1 db_conn, "events", "stream_ordering", step=-1
) )
if hs.config.run_background_tasks: if hs.config.worker.run_background_tasks:
# We periodically clean out old transaction ID mappings # We periodically clean out old transaction ID mappings
self._clock.looping_call( self._clock.looping_call(
self._cleanup_old_transaction_ids, self._cleanup_old_transaction_ids,

View file

@ -56,7 +56,7 @@ class ServerMetricsStore(EventPushActionsWorkerStore, SQLBaseStore):
super().__init__(database, db_conn, hs) super().__init__(database, db_conn, hs)
# Read the extrems every 60 minutes # Read the extrems every 60 minutes
if hs.config.run_background_tasks: if hs.config.worker.run_background_tasks:
self._clock.looping_call(self._read_forward_extremities, 60 * 60 * 1000) self._clock.looping_call(self._read_forward_extremities, 60 * 60 * 1000)
# Used in _generate_user_daily_visits to keep track of progress # Used in _generate_user_daily_visits to keep track of progress

View file

@ -132,14 +132,14 @@ class RegistrationWorkerStore(CacheInvalidationWorkerStore):
hs.config.account_validity.account_validity_startup_job_max_delta hs.config.account_validity.account_validity_startup_job_max_delta
) )
if hs.config.run_background_tasks: if hs.config.worker.run_background_tasks:
self._clock.call_later( self._clock.call_later(
0.0, 0.0,
self._set_expiration_date_when_missing, self._set_expiration_date_when_missing,
) )
# Create a background job for culling expired 3PID validity tokens # Create a background job for culling expired 3PID validity tokens
if hs.config.run_background_tasks: if hs.config.worker.run_background_tasks:
self._clock.looping_call( self._clock.looping_call(
self.cull_expired_threepid_validation_tokens, THIRTY_MINUTES_IN_MS self.cull_expired_threepid_validation_tokens, THIRTY_MINUTES_IN_MS
) )

View file

@ -815,7 +815,7 @@ class RoomWorkerStore(SQLBaseStore):
If it is `None` media will be removed from quarantine If it is `None` media will be removed from quarantine
""" """
logger.info("Quarantining media: %s/%s", server_name, media_id) logger.info("Quarantining media: %s/%s", server_name, media_id)
is_local = server_name == self.config.server_name is_local = server_name == self.config.server.server_name
def _quarantine_media_by_id_txn(txn): def _quarantine_media_by_id_txn(txn):
local_mxcs = [media_id] if is_local else [] local_mxcs = [media_id] if is_local else []

View file

@ -81,7 +81,7 @@ class RoomMemberWorkerStore(EventsWorkerStore):
txn.close() txn.close()
if ( if (
self.hs.config.run_background_tasks self.hs.config.worker.run_background_tasks
and self.hs.config.metrics_flags.known_servers and self.hs.config.metrics_flags.known_servers
): ):
self._known_servers_count = 1 self._known_servers_count = 1

View file

@ -48,7 +48,7 @@ class SessionStore(SQLBaseStore):
super().__init__(database, db_conn, hs) super().__init__(database, db_conn, hs)
# Create a background job for culling expired sessions. # Create a background job for culling expired sessions.
if hs.config.run_background_tasks: if hs.config.worker.run_background_tasks:
self._clock.looping_call(self._delete_expired_sessions, 30 * 60 * 1000) self._clock.looping_call(self._delete_expired_sessions, 30 * 60 * 1000)
async def create_session( async def create_session(

View file

@ -672,7 +672,7 @@ class StatsStore(StateDeltasStore):
def get_users_media_usage_paginate_txn(txn): def get_users_media_usage_paginate_txn(txn):
filters = [] filters = []
args = [self.hs.config.server_name] args = [self.hs.config.server.server_name]
if search_term: if search_term:
filters.append("(lmr.user_id LIKE ? OR displayname LIKE ?)") filters.append("(lmr.user_id LIKE ? OR displayname LIKE ?)")

View file

@ -60,7 +60,7 @@ class TransactionWorkerStore(CacheInvalidationWorkerStore):
def __init__(self, database: DatabasePool, db_conn, hs): def __init__(self, database: DatabasePool, db_conn, hs):
super().__init__(database, db_conn, hs) super().__init__(database, db_conn, hs)
if hs.config.run_background_tasks: if hs.config.worker.run_background_tasks:
self._clock.looping_call(self._cleanup_transactions, 30 * 60 * 1000) self._clock.looping_call(self._cleanup_transactions, 30 * 60 * 1000)
@wrap_as_background_process("cleanup_transactions") @wrap_as_background_process("cleanup_transactions")

View file

@ -510,7 +510,7 @@ class UserDirectoryStore(UserDirectoryBackgroundUpdateStore):
self._prefer_local_users_in_search = ( self._prefer_local_users_in_search = (
hs.config.user_directory_search_prefer_local_users hs.config.user_directory_search_prefer_local_users
) )
self._server_name = hs.config.server_name self._server_name = hs.config.server.server_name
async def remove_from_user_dir(self, user_id: str) -> None: async def remove_from_user_dir(self, user_id: str) -> None:
def _remove_from_user_dir_txn(txn): def _remove_from_user_dir_txn(txn):

View file

@ -134,7 +134,7 @@ def prepare_database(
# if it's a worker app, refuse to upgrade the database, to avoid multiple # if it's a worker app, refuse to upgrade the database, to avoid multiple
# workers doing it at once. # workers doing it at once.
if ( if (
config.worker_app is not None config.worker.worker_app is not None
and version_info.current_version != SCHEMA_VERSION and version_info.current_version != SCHEMA_VERSION
): ):
raise UpgradeDatabaseException( raise UpgradeDatabaseException(
@ -154,7 +154,7 @@ def prepare_database(
# if it's a worker app, refuse to upgrade the database, to avoid multiple # if it's a worker app, refuse to upgrade the database, to avoid multiple
# workers doing it at once. # workers doing it at once.
if config and config.worker_app is not None: if config and config.worker.worker_app is not None:
raise UpgradeDatabaseException(EMPTY_DATABASE_ON_WORKER_ERROR) raise UpgradeDatabaseException(EMPTY_DATABASE_ON_WORKER_ERROR)
_setup_new_database(cur, database_engine, databases=databases) _setup_new_database(cur, database_engine, databases=databases)
@ -355,7 +355,7 @@ def _upgrade_existing_database(
else: else:
assert config assert config
is_worker = config and config.worker_app is not None is_worker = config and config.worker.worker_app is not None
if ( if (
current_schema_state.compat_version is not None current_schema_state.compat_version is not None

View file

@ -38,7 +38,7 @@ def run_upgrade(cur, database_engine, config, *args, **kwargs):
logger.warning("Could not get app_service_config_files from config") logger.warning("Could not get app_service_config_files from config")
pass pass
appservices = load_appservices(config.server_name, config_files) appservices = load_appservices(config.server.server_name, config_files)
owned = {} owned = {}

View file

@ -67,7 +67,7 @@ def run_upgrade(cur, database_engine, config, *args, **kwargs):
INNER JOIN room_memberships AS r USING (event_id) INNER JOIN room_memberships AS r USING (event_id)
WHERE type = 'm.room.member' AND state_key LIKE ? WHERE type = 'm.room.member' AND state_key LIKE ?
""" """
cur.execute(sql, ("%:" + config.server_name,)) cur.execute(sql, ("%:" + config.server.server_name,))
cur.execute( cur.execute(
"CREATE UNIQUE INDEX local_current_membership_idx ON local_current_membership(user_id, room_id)" "CREATE UNIQUE INDEX local_current_membership_idx ON local_current_membership(user_id, room_id)"

View file

@ -63,12 +63,12 @@ def build_jinja_env(
env.filters.update( env.filters.update(
{ {
"format_ts": _format_ts_filter, "format_ts": _format_ts_filter,
"mxc_to_http": _create_mxc_to_http_filter(config.public_baseurl), "mxc_to_http": _create_mxc_to_http_filter(config.server.public_baseurl),
} }
) )
# common variables for all templates # common variables for all templates
env.globals.update({"server_name": config.server_name}) env.globals.update({"server_name": config.server.server_name})
return env return env

View file

@ -23,10 +23,13 @@ class WellKnownTests(unittest.HomeserverTestCase):
# replace the JsonResource with a WellKnownResource # replace the JsonResource with a WellKnownResource
return WellKnownResource(self.hs) return WellKnownResource(self.hs)
@unittest.override_config(
{
"public_baseurl": "https://tesths",
"default_identity_server": "https://testis",
}
)
def test_well_known(self): def test_well_known(self):
self.hs.config.public_baseurl = "https://tesths"
self.hs.config.default_identity_server = "https://testis"
channel = self.make_request( channel = self.make_request(
"GET", "/.well-known/matrix/client", shorthand=False "GET", "/.well-known/matrix/client", shorthand=False
) )
@ -35,14 +38,17 @@ class WellKnownTests(unittest.HomeserverTestCase):
self.assertEqual( self.assertEqual(
channel.json_body, channel.json_body,
{ {
"m.homeserver": {"base_url": "https://tesths"}, "m.homeserver": {"base_url": "https://tesths/"},
"m.identity_server": {"base_url": "https://testis"}, "m.identity_server": {"base_url": "https://testis"},
}, },
) )
@unittest.override_config(
{
"public_baseurl": None,
}
)
def test_well_known_no_public_baseurl(self): def test_well_known_no_public_baseurl(self):
self.hs.config.public_baseurl = None
channel = self.make_request( channel = self.make_request(
"GET", "/.well-known/matrix/client", shorthand=False "GET", "/.well-known/matrix/client", shorthand=False
) )