Update ruff config (#16283)

Enable additional checks & clean-up unneeded configuration.
This commit is contained in:
Patrick Cloke 2023-09-08 11:24:36 -04:00 committed by GitHub
parent c1c6c95d72
commit aa483cb4c9
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
26 changed files with 63 additions and 64 deletions

1
changelog.d/16283.misc Normal file
View file

@ -0,0 +1 @@
Enable additional linting checks.

View file

@ -37,7 +37,6 @@ class HttpClient:
Deferred: Succeeds when we get a 2xx HTTP response. The result
will be the decoded JSON body.
"""
pass
def get_json(self, url, args=None):
"""Gets some json from the given host homeserver and path
@ -53,7 +52,6 @@ class HttpClient:
Deferred: Succeeds when we get a 2xx HTTP response. The result
will be the decoded JSON body.
"""
pass
class TwistedHttpClient(HttpClient):

View file

@ -239,7 +239,7 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None:
log("Could not find %s, will not use" % (jemallocpath,))
# if there are no config files passed to synapse, try adding the default file
if not any(p.startswith("--config-path") or p.startswith("-c") for p in args):
if not any(p.startswith(("--config-path", "-c")) for p in args):
config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data")
config_path = environ.get(
"SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml"

View file

@ -43,33 +43,39 @@ target-version = ['py38', 'py39', 'py310', 'py311']
[tool.ruff]
line-length = 88
# See https://github.com/charliermarsh/ruff/#pycodestyle
# See https://beta.ruff.rs/docs/rules/#error-e
# for error codes. The ones we ignore are:
# E731: do not assign a lambda expression, use a def
# E501: Line too long (black enforces this for us)
# E731: do not assign a lambda expression, use a def
#
# flake8-bugbear compatible checks. Its error codes are described at
# https://github.com/charliermarsh/ruff/#flake8-bugbear
# B019: Use of functools.lru_cache or functools.cache on methods can lead to memory leaks
# https://beta.ruff.rs/docs/rules/#flake8-bugbear-b
# B023: Functions defined inside a loop must not use variables redefined in the loop
# B024: Abstract base class with no abstract method.
ignore = [
"B019",
"B023",
"B024",
"E501",
"E731",
]
select = [
# pycodestyle checks.
# pycodestyle
"E",
"W",
# pyflakes checks.
# pyflakes
"F",
# flake8-bugbear checks.
# flake8-bugbear
"B0",
# flake8-comprehensions checks.
# flake8-comprehensions
"C4",
# flake8-2020
"YTT",
# flake8-slots
"SLOT",
# flake8-debugger
"T10",
# flake8-pie
"PIE",
# flake8-executable
"EXE",
]
[tool.isort]

View file

@ -30,9 +30,10 @@ class SynapsePlugin(Plugin):
self, fullname: str
) -> Optional[Callable[[MethodSigContext], CallableType]]:
if fullname.startswith(
"synapse.util.caches.descriptors.CachedFunction.__call__"
) or fullname.startswith(
"synapse.util.caches.descriptors._LruCachedFunction.__call__"
(
"synapse.util.caches.descriptors.CachedFunction.__call__",
"synapse.util.caches.descriptors._LruCachedFunction.__call__",
)
):
return cached_function_method_signature
return None

View file

@ -1,4 +1,3 @@
#!/usr/bin/env python
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");

View file

@ -55,7 +55,6 @@ class UnpersistedEventContextBase(ABC):
A method to convert an UnpersistedEventContext to an EventContext, suitable for
sending to the database with the associated event.
"""
pass
@abstractmethod
async def get_prev_state_ids(
@ -69,7 +68,6 @@ class UnpersistedEventContextBase(ABC):
state_filter: specifies the type of state event to fetch from DB, example:
EventTypes.JoinRules
"""
pass
@attr.s(slots=True, auto_attribs=True)

View file

@ -846,9 +846,7 @@ def _is_media(content_type: str) -> bool:
def _is_html(content_type: str) -> bool:
content_type = content_type.lower()
return content_type.startswith("text/html") or content_type.startswith(
"application/xhtml"
)
return content_type.startswith(("text/html", "application/xhtml"))
def _is_json(content_type: str) -> bool:

View file

@ -62,7 +62,6 @@ class Constraint(metaclass=abc.ABCMeta):
@abc.abstractmethod
def make_check_clause(self, table: str) -> str:
"""Returns an SQL expression that checks the row passes the constraint."""
pass
@abc.abstractmethod
def make_constraint_clause_postgres(self) -> str:
@ -70,7 +69,6 @@ class Constraint(metaclass=abc.ABCMeta):
Only used on Postgres DBs
"""
pass
@attr.s(auto_attribs=True)

View file

@ -112,7 +112,7 @@ async def main(reactor, loops):
start = perf_counter()
# Send a bunch of useful messages
for i in range(0, loops):
for i in range(loops):
logger.info("test message %s", i)
if len(handler._buffer) == handler.maximum_buffer:

View file

@ -223,7 +223,7 @@ class DeviceTestCase(unittest.HomeserverTestCase):
# queue a bunch of messages in the inbox
requester = create_requester(sender, device_id=DEVICE_ID)
for i in range(0, DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT + 10):
for i in range(DeviceHandler.DEVICE_MSGS_DELETE_BATCH_LIMIT + 10):
self.get_success(
self.device_message_handler.send_device_message(
requester, "message_type", {receiver: {"*": {"val": i}}}

View file

@ -262,7 +262,7 @@ class FederationTestCase(unittest.FederatingHomeserverTestCase):
if (ev.type, ev.state_key)
in {("m.room.create", ""), ("m.room.member", remote_server_user_id)}
]
for _ in range(0, 8):
for _ in range(8):
event = make_event_from_dict(
self.add_hashes_and_signatures_from_other_server(
{

View file

@ -78,11 +78,11 @@ class RemoteHandlerTestCase(LoggerCleanupMixin, TestCase):
logger = self.get_logger(handler)
# Send some debug messages
for i in range(0, 3):
for i in range(3):
logger.debug("debug %s" % (i,))
# Send a bunch of useful messages
for i in range(0, 7):
for i in range(7):
logger.info("info %s" % (i,))
# The last debug message pushes it past the maximum buffer
@ -108,15 +108,15 @@ class RemoteHandlerTestCase(LoggerCleanupMixin, TestCase):
logger = self.get_logger(handler)
# Send some debug messages
for i in range(0, 3):
for i in range(3):
logger.debug("debug %s" % (i,))
# Send a bunch of useful messages
for i in range(0, 10):
for i in range(10):
logger.warning("warn %s" % (i,))
# Send a bunch of info messages
for i in range(0, 3):
for i in range(3):
logger.info("info %s" % (i,))
# The last debug message pushes it past the maximum buffer
@ -144,7 +144,7 @@ class RemoteHandlerTestCase(LoggerCleanupMixin, TestCase):
logger = self.get_logger(handler)
# Send a bunch of useful messages
for i in range(0, 20):
for i in range(20):
logger.warning("warn %s" % (i,))
# Allow the reconnection

View file

@ -49,7 +49,7 @@ class ToDeviceStreamTestCase(BaseStreamTestCase):
# add messages to the device inbox for user1 up until the
# limit defined for a stream update batch
for i in range(0, _STREAM_UPDATE_TARGET_ROW_COUNT):
for i in range(_STREAM_UPDATE_TARGET_ROW_COUNT):
msg["content"] = {"device": {}}
messages = {user1: {"device": msg}}

View file

@ -510,7 +510,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
Args:
number_destinations: Number of destinations to be created
"""
for i in range(0, number_destinations):
for i in range(number_destinations):
dest = f"sub{i}.example.com"
self._create_destination(dest, 50, 50, 50, 100)
@ -690,7 +690,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase):
self._check_fields(channel_desc.json_body["rooms"])
# test that both lists have different directions
for i in range(0, number_rooms):
for i in range(number_rooms):
self.assertEqual(
channel_asc.json_body["rooms"][i]["room_id"],
channel_desc.json_body["rooms"][number_rooms - 1 - i]["room_id"],
@ -777,7 +777,7 @@ class DestinationMembershipTestCase(unittest.HomeserverTestCase):
Args:
number_rooms: Number of rooms to be created
"""
for _ in range(0, number_rooms):
for _ in range(number_rooms):
room_id = self.helper.create_room_as(
self.admin_user, tok=self.admin_user_tok
)

View file

@ -575,7 +575,7 @@ class DeactivateTestCase(unittest.HomeserverTestCase):
# create a bunch of users and add keys for them
users = []
for i in range(0, 20):
for i in range(20):
user_id = self.register_user("missPiggy" + str(i), "test")
users.append((user_id,))

View file

@ -176,10 +176,10 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
def test_POST_ratelimiting_per_address(self) -> None:
# Create different users so we're sure not to be bothered by the per-user
# ratelimiter.
for i in range(0, 6):
for i in range(6):
self.register_user("kermit" + str(i), "monkey")
for i in range(0, 6):
for i in range(6):
params = {
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": "kermit" + str(i)},
@ -228,7 +228,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
def test_POST_ratelimiting_per_account(self) -> None:
self.register_user("kermit", "monkey")
for i in range(0, 6):
for i in range(6):
params = {
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": "kermit"},
@ -277,7 +277,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
def test_POST_ratelimiting_per_account_failed_attempts(self) -> None:
self.register_user("kermit", "monkey")
for i in range(0, 6):
for i in range(6):
params = {
"type": "m.login.password",
"identifier": {"type": "m.id.user", "user": "kermit"},

View file

@ -169,7 +169,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
@override_config({"rc_registration": {"per_second": 0.17, "burst_count": 5}})
def test_POST_ratelimiting_guest(self) -> None:
for i in range(0, 6):
for i in range(6):
url = self.url + b"?kind=guest"
channel = self.make_request(b"POST", url, b"{}")
@ -187,7 +187,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase):
@override_config({"rc_registration": {"per_second": 0.17, "burst_count": 5}})
def test_POST_ratelimiting(self) -> None:
for i in range(0, 6):
for i in range(6):
request_data = {
"username": "kermit" + str(i),
"password": "monkey",
@ -1223,7 +1223,7 @@ class RegistrationTokenValidityRestServletTestCase(unittest.HomeserverTestCase):
def test_GET_ratelimiting(self) -> None:
token = "1234"
for i in range(0, 6):
for i in range(6):
channel = self.make_request(
b"GET",
f"{self.url}?token={token}",

View file

@ -382,7 +382,7 @@ class ReadWriteLockTestCase(unittest.HomeserverTestCase):
self.get_success(lock.__aenter__())
# Wait for ages with the lock, we should not be able to get the lock.
for _ in range(0, 10):
for _ in range(10):
self.reactor.advance((_RENEWAL_INTERVAL_MS / 1000))
lock2 = self.get_success(

View file

@ -664,7 +664,7 @@ class EventChainBackgroundUpdateTestCase(HomeserverTestCase):
# Add a bunch of state so that it takes multiple iterations of the
# background update to process the room.
for i in range(0, 150):
for i in range(150):
self.helper.send_state(
room_id, event_type="m.test", body={"index": i}, tok=self.token
)
@ -718,12 +718,12 @@ class EventChainBackgroundUpdateTestCase(HomeserverTestCase):
# Add a bunch of state so that it takes multiple iterations of the
# background update to process the room.
for i in range(0, 150):
for i in range(150):
self.helper.send_state(
room_id1, event_type="m.test", body={"index": i}, tok=self.token
)
for i in range(0, 150):
for i in range(150):
self.helper.send_state(
room_id2, event_type="m.test", body={"index": i}, tok=self.token
)

View file

@ -227,7 +227,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
(room_id, event_id),
)
for i in range(0, 20):
for i in range(20):
self.get_success(
self.store.db_pool.runInteraction("insert", insert_event, i)
)
@ -235,7 +235,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
# this should get the last ten
r = self.get_success(self.store.get_prev_events_for_room(room_id))
self.assertEqual(10, len(r))
for i in range(0, 10):
for i in range(10):
self.assertEqual("$event_%i:local" % (19 - i), r[i])
def test_get_rooms_with_many_extremities(self) -> None:
@ -277,7 +277,7 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
(room_id, event_id),
)
for i in range(0, 20):
for i in range(20):
self.get_success(
self.store.db_pool.runInteraction("insert", insert_event, i, room1)
)

View file

@ -82,7 +82,7 @@ class ProfileStoreTestCase(unittest.HomeserverTestCase):
self.get_success(self.store.db_pool.runInteraction("", f))
for i in range(0, 70):
for i in range(70):
self.get_success(
self.store.db_pool.simple_insert(
"profiles",
@ -115,7 +115,7 @@ class ProfileStoreTestCase(unittest.HomeserverTestCase):
)
expected_values = []
for i in range(0, 70):
for i in range(70):
expected_values.append((f"@hello{i:02}:{self.hs.hostname}",))
res = self.get_success(

View file

@ -38,5 +38,5 @@ class SQLTransactionLimitTestCase(unittest.HomeserverTestCase):
db_pool = self.hs.get_datastores().databases[0]
# force txn limit to roll over at least once
for _ in range(0, 1001):
for _ in range(1001):
self.get_success_or_raise(db_pool.runInteraction("test_select", do_select))

View file

@ -45,7 +45,7 @@ class UserFiltersStoreTestCase(unittest.HomeserverTestCase):
self.get_success(self.store.db_pool.runInteraction("", f))
for i in range(0, 70):
for i in range(70):
self.get_success(
self.store.db_pool.simple_insert(
"user_filters",
@ -82,7 +82,7 @@ class UserFiltersStoreTestCase(unittest.HomeserverTestCase):
)
expected_values = []
for i in range(0, 70):
for i in range(70):
expected_values.append((f"@hello{i:02}:{self.hs.hostname}",))
res = self.get_success(

View file

@ -51,12 +51,12 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
# before we do that, we persist some other events to act as state.
self._inject_visibility("@admin:hs", "joined")
for i in range(0, 10):
for i in range(10):
self._inject_room_member("@resident%i:hs" % i)
events_to_filter = []
for i in range(0, 10):
for i in range(10):
user = "@user%i:%s" % (i, "test_server" if i == 5 else "other_server")
evt = self._inject_room_member(user, extra_content={"a": "b"})
events_to_filter.append(evt)
@ -74,7 +74,7 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
)
# the result should be 5 redacted events, and 5 unredacted events.
for i in range(0, 5):
for i in range(5):
self.assertEqual(events_to_filter[i].event_id, filtered[i].event_id)
self.assertNotIn("a", filtered[i].content)
@ -177,7 +177,7 @@ class FilterEventsForServerTestCase(unittest.HomeserverTestCase):
)
)
for i in range(0, len(events_to_filter)):
for i in range(len(events_to_filter)):
self.assertEqual(
events_to_filter[i].event_id,
filtered[i].event_id,

View file

@ -623,14 +623,14 @@ class CacheDecoratorTestCase(unittest.HomeserverTestCase):
a = A()
for k in range(0, 12):
for k in range(12):
yield a.func(k)
self.assertEqual(callcount[0], 12)
# There must have been at least 2 evictions, meaning if we calculate
# all 12 values again, we must get called at least 2 more times
for k in range(0, 12):
for k in range(12):
yield a.func(k)
self.assertTrue(