Fix some spelling mistakes / typos. (#7811)

This commit is contained in:
Patrick Cloke 2020-07-09 09:52:58 -04:00 committed by GitHub
parent 53ee214f2f
commit 38e1fac886
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
31 changed files with 41 additions and 40 deletions

1
changelog.d/7811.misc Normal file
View file

@ -0,0 +1 @@
Fix various spelling errors in comments and log lines.

View file

@ -537,7 +537,7 @@ class Auth(object):
# Currently we ignore the `for_verification` flag even though there are
# some situations where we can drop particular auth events when adding
# to the event's `auth_events` (e.g. joins pointing to previous joins
# when room is publically joinable). Dropping event IDs has the
# when room is publicly joinable). Dropping event IDs has the
# advantage that the auth chain for the room grows slower, but we use
# the auth chain in state resolution v2 to order events, which means
# care must be taken if dropping events to ensure that it doesn't

View file

@ -72,7 +72,7 @@ class EmailConfig(Config):
template_dir = email_config.get("template_dir")
# we need an absolute path, because we change directory after starting (and
# we don't yet know what auxilliary templates like mail.css we will need).
# we don't yet know what auxiliary templates like mail.css we will need).
# (Note that loading as package_resources with jinja.PackageLoader doesn't
# work for the same reason.)
if not template_dir:

View file

@ -245,7 +245,7 @@ class FederationClient(FederationBase):
event_id: event to fetch
room_version: version of the room
outlier: Indicates whether the PDU is an `outlier`, i.e. if
it's from an arbitary point in the context as opposed to part
it's from an arbitrary point in the context as opposed to part
of the current block of PDUs. Defaults to `False`
timeout: How long to try (in ms) each destination for before
moving to the next destination. None indicates no timeout.
@ -351,7 +351,7 @@ class FederationClient(FederationBase):
outlier: bool = False,
include_none: bool = False,
) -> List[EventBase]:
"""Takes a list of PDUs and checks the signatures and hashs of each
"""Takes a list of PDUs and checks the signatures and hashes of each
one. If a PDU fails its signature check then we check if we have it in
the database and if not then request if from the originating server of
that PDU.

View file

@ -717,7 +717,7 @@ def server_matches_acl_event(server_name: str, acl_event: EventBase) -> bool:
# server name is a literal IP
allow_ip_literals = acl_event.content.get("allow_ip_literals", True)
if not isinstance(allow_ip_literals, bool):
logger.warning("Ignorning non-bool allow_ip_literals flag")
logger.warning("Ignoring non-bool allow_ip_literals flag")
allow_ip_literals = True
if not allow_ip_literals:
# check for ipv6 literals. These start with '['.
@ -731,7 +731,7 @@ def server_matches_acl_event(server_name: str, acl_event: EventBase) -> bool:
# next, check the deny list
deny = acl_event.content.get("deny", [])
if not isinstance(deny, (list, tuple)):
logger.warning("Ignorning non-list deny ACL %s", deny)
logger.warning("Ignoring non-list deny ACL %s", deny)
deny = []
for e in deny:
if _acl_entry_matches(server_name, e):
@ -741,7 +741,7 @@ def server_matches_acl_event(server_name: str, acl_event: EventBase) -> bool:
# then the allow list.
allow = acl_event.content.get("allow", [])
if not isinstance(allow, (list, tuple)):
logger.warning("Ignorning non-list allow ACL %s", allow)
logger.warning("Ignoring non-list allow ACL %s", allow)
allow = []
for e in allow:
if _acl_entry_matches(server_name, e):

View file

@ -359,7 +359,7 @@ class BaseFederationRow(object):
Specifies how to identify, serialize and deserialize the different types.
"""
TypeId = "" # Unique string that ids the type. Must be overriden in sub classes.
TypeId = "" # Unique string that ids the type. Must be overridden in sub classes.
@staticmethod
def from_data(data):

View file

@ -119,7 +119,7 @@ class PerDestinationQueue(object):
)
def send_pdu(self, pdu: EventBase, order: int) -> None:
"""Add a PDU to the queue, and start the transmission loop if neccessary
"""Add a PDU to the queue, and start the transmission loop if necessary
Args:
pdu: pdu to send
@ -129,7 +129,7 @@ class PerDestinationQueue(object):
self.attempt_new_transaction()
def send_presence(self, states: Iterable[UserPresenceState]) -> None:
"""Add presence updates to the queue. Start the transmission loop if neccessary.
"""Add presence updates to the queue. Start the transmission loop if necessary.
Args:
states: presence to send

View file

@ -746,7 +746,7 @@ class TransportLayerClient(object):
def remove_user_from_group(
self, destination, group_id, requester_user_id, user_id, content
):
"""Remove a user fron a group
"""Remove a user from a group
"""
path = _create_v1_path("/groups/%s/users/%s/remove", group_id, user_id)

View file

@ -109,7 +109,7 @@ class Authenticator(object):
self.server_name = hs.hostname
self.store = hs.get_datastore()
self.federation_domain_whitelist = hs.config.federation_domain_whitelist
self.notifer = hs.get_notifier()
self.notifier = hs.get_notifier()
self.replication_client = None
if hs.config.worker.worker_app:
@ -175,7 +175,7 @@ class Authenticator(object):
await self.store.set_destination_retry_timings(origin, None, 0, 0)
# Inform the relevant places that the remote server is back up.
self.notifer.notify_remote_server_up(origin)
self.notifier.notify_remote_server_up(origin)
if self.replication_client:
# If we're on a worker we try and inform master about this. The
# replication client doesn't hook into the notifier to avoid

View file

@ -83,7 +83,7 @@ class _NotifierUserStream(object):
self.current_token = current_token
# The last token for which we should wake up any streams that have a
# token that comes before it. This gets updated everytime we get poked.
# token that comes before it. This gets updated every time we get poked.
# We start it at the current token since if we get any streams
# that have a token from before we have no idea whether they should be
# woken up or not, so lets just wake them up.

View file

@ -92,11 +92,11 @@ class ReplicationEndpoint(object):
# assert here that sub classes don't try and use the name.
assert (
"instance_name" not in self.PATH_ARGS
), "`instance_name` is a reserved paramater name"
), "`instance_name` is a reserved parameter name"
assert (
"instance_name"
not in signature(self.__class__._serialize_payload).parameters
), "`instance_name` is a reserved paramater name"
), "`instance_name` is a reserved parameter name"
assert self.METHOD in ("PUT", "POST", "GET")

View file

@ -25,7 +25,7 @@ Structure of the module:
* command.py - the definitions of all the valid commands
* protocol.py - the TCP protocol classes
* resource.py - handles streaming stream updates to replications
* streams/ - the definitons of all the valid streams
* streams/ - the definitions of all the valid streams
The general interaction of the classes are:

View file

@ -47,7 +47,7 @@ class Command(metaclass=abc.ABCMeta):
@abc.abstractmethod
def to_line(self) -> str:
"""Serialises the comamnd for the wire. Does not include the command
"""Serialises the command for the wire. Does not include the command
prefix.
"""

View file

@ -317,7 +317,7 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
def _queue_command(self, cmd):
"""Queue the command until the connection is ready to write to again.
"""
logger.debug("[%s] Queing as conn %r, cmd: %r", self.id(), self.state, cmd)
logger.debug("[%s] Queueing as conn %r, cmd: %r", self.id(), self.state, cmd)
self.pending_commands.append(cmd)
if len(self.pending_commands) > self.max_line_buffer:

View file

@ -177,7 +177,7 @@ class RedisDirectTcpReplicationClientFactory(txredisapi.SubscriberFactory):
Args:
hs
outbound_redis_connection: A connection to redis that will be used to
send outbound commands (this is seperate to the redis connection
send outbound commands (this is separate to the redis connection
used to subscribe).
"""

View file

@ -62,7 +62,7 @@ class BaseEventsStreamRow(object):
Specifies how to identify, serialize and deserialize the different types.
"""
# Unique string that ids the type. Must be overriden in sub classes.
# Unique string that ids the type. Must be overridden in sub classes.
TypeId = None # type: str
@classmethod

View file

@ -68,13 +68,13 @@ class PaginationConfig(object):
elif from_tok:
from_tok = StreamToken.from_string(from_tok)
except Exception:
raise SynapseError(400, "'from' paramater is invalid")
raise SynapseError(400, "'from' parameter is invalid")
try:
if to_tok:
to_tok = StreamToken.from_string(to_tok)
except Exception:
raise SynapseError(400, "'to' paramater is invalid")
raise SynapseError(400, "'to' parameter is invalid")
limit = parse_integer(request, "limit", default=default_limit)

View file

@ -68,7 +68,7 @@ class EventSources(object):
The returned token does not have the current values for fields other
than `room`, since they are not used during pagination.
Retuns:
Returns:
Deferred[StreamToken]
"""
token = StreamToken(

View file

@ -55,7 +55,7 @@ class Clock(object):
return self._reactor.seconds()
def time_msec(self):
"""Returns the current system time in miliseconds since epoch."""
"""Returns the current system time in milliseconds since epoch."""
return int(self.time() * 1000)
def looping_call(self, f, msec, *args, **kwargs):

View file

@ -352,7 +352,7 @@ class ReadWriteLock(object):
# resolved when they release the lock).
#
# Read: We know its safe to acquire a read lock when the latest writer has
# been resolved. The new reader is appeneded to the list of latest readers.
# been resolved. The new reader is appended to the list of latest readers.
#
# Write: We know its safe to acquire the write lock when both the latest
# writers and readers have been resolved. The new writer replaces the latest

View file

@ -516,7 +516,7 @@ class CacheListDescriptor(_CacheDescriptorBase):
"""
Args:
orig (function)
cached_method_name (str): The name of the chached method.
cached_method_name (str): The name of the cached method.
list_name (str): Name of the argument which is the bulk lookup list
num_args (int): number of positional arguments (excluding ``self``,
but including list_name) to use as cache keys. Defaults to all

View file

@ -39,7 +39,7 @@ class Distributor(object):
Signals are named simply by strings.
TODO(paul): It would be nice to give signals stronger object identities,
so we can attach metadata, docstrings, detect typoes, etc... But this
so we can attach metadata, docstrings, detect typos, etc... But this
model will do for today.
"""

View file

@ -192,7 +192,7 @@ def _check_yield_points(f: Callable, changes: List[str]):
result = yield d
except Exception:
# this will fish an earlier Failure out of the stack where possible, and
# thus is preferable to passing in an exeception to the Failure
# thus is preferable to passing in an exception to the Failure
# constructor, since it results in less stack-mangling.
result = Failure()

View file

@ -22,7 +22,7 @@ from synapse.api.errors import CodeMessageException
logger = logging.getLogger(__name__)
# the intial backoff, after the first transaction fails
# the initial backoff, after the first transaction fails
MIN_RETRY_INTERVAL = 10 * 60 * 1000
# how much we multiply the backoff by after each subsequent fail
@ -174,7 +174,7 @@ class RetryDestinationLimiter(object):
# has been decommissioned.
# If we get a 401, then we should probably back off since they
# won't accept our requests for at least a while.
# 429 is us being aggresively rate limited, so lets rate limit
# 429 is us being aggressively rate limited, so lets rate limit
# ourselves.
if exc_val.code == 404 and self.backoff_on_404:
valid_err_code = False

View file

@ -319,7 +319,7 @@ def filter_events_for_server(
return True
# Lets check to see if all the events have a history visibility
# of "shared" or "world_readable". If thats the case then we don't
# of "shared" or "world_readable". If that's the case then we don't
# need to check membership (as we know the server is in the room).
event_to_state_ids = yield storage.state.get_state_ids_for_events(
frozenset(e.event_id for e in events),
@ -335,7 +335,7 @@ def filter_events_for_server(
visibility_ids.add(hist)
# If we failed to find any history visibility events then the default
# is "shared" visiblity.
# is "shared" visibility.
if not visibility_ids:
all_open = True
else:

View file

@ -192,7 +192,7 @@ class KeyringTestCase(unittest.HomeserverTestCase):
d = _verify_json_for_server(kr, "server9", {}, 0, "test unsigned")
self.failureResultOf(d, SynapseError)
# should suceed on a signed object
# should succeed on a signed object
d = _verify_json_for_server(kr, "server9", json1, 500, "test signed")
# self.assertFalse(d.called)
self.get_success(d)

View file

@ -126,7 +126,7 @@ class RetentionTestCase(unittest.HomeserverTestCase):
events.append(self.get_success(store.get_event(valid_event_id)))
# Advance the time by anothe 2 days. After this, the first event should be
# Advance the time by another 2 days. After this, the first event should be
# outdated but not the second one.
self.reactor.advance(one_day_ms * 2 / 1000)

View file

@ -60,7 +60,7 @@ class PresenceTestCase(unittest.HomeserverTestCase):
def test_put_presence_disabled(self):
"""
PUT to the status endpoint with use_presence disbled will NOT call
PUT to the status endpoint with use_presence disabled will NOT call
set_state on the presence handler.
"""
self.hs.config.use_presence = False

View file

@ -99,7 +99,7 @@ class RelationsTestCase(unittest.HomeserverTestCase):
self.assertEquals(400, channel.code, channel.json_body)
def test_basic_paginate_relations(self):
"""Tests that calling pagination API corectly the latest relations.
"""Tests that calling pagination API correctly the latest relations.
"""
channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction")
self.assertEquals(200, channel.code, channel.json_body)

View file

@ -166,7 +166,7 @@ class TestMauLimit(unittest.HomeserverTestCase):
self.do_sync_for_user(token5)
self.do_sync_for_user(token6)
# But old user cant
# But old user can't
with self.assertRaises(SynapseError) as cm:
self.do_sync_for_user(token1)

View file

@ -124,7 +124,7 @@ class LoggingContextTestCase(unittest.TestCase):
@defer.inlineCallbacks
def test_make_deferred_yieldable(self):
# a function which retuns an incomplete deferred, but doesn't follow
# a function which returns an incomplete deferred, but doesn't follow
# the synapse rules.
def blocking_function():
d = defer.Deferred()
@ -183,7 +183,7 @@ class LoggingContextTestCase(unittest.TestCase):
@defer.inlineCallbacks
def test_make_deferred_yieldable_with_await(self):
# an async function which retuns an incomplete coroutine, but doesn't
# an async function which returns an incomplete coroutine, but doesn't
# follow the synapse rules.
async def blocking_function():