mirror of
https://github.com/element-hq/synapse
synced 2024-06-23 19:33:29 +00:00
Compare commits
29 commits
e4cd0ea2bd
...
02ee634cb4
Author | SHA1 | Date | |
---|---|---|---|
02ee634cb4 | |||
3aae60f17b | |||
0ea4fdd354 | |||
aff2e82281 | |||
eaaf4089ec | |||
9896478297 | |||
d752b8ae19 | |||
bb5dfc3c0d | |||
f69d1c50a5 | |||
355de36d93 | |||
2e4627bf90 | |||
271ae6f8e7 | |||
d0d198fa74 | |||
5dd6d3770d | |||
a6e5798dd3 | |||
7aa0519589 | |||
48eca7dbb7 | |||
945197bc74 | |||
7dec9307dc | |||
578b44af4c | |||
61f86e0d39 | |||
35b18be859 | |||
4412dbd22c | |||
44088bd4af | |||
88fe201f00 | |||
dd439386c7 | |||
d8e2b1d6d5 | |||
360f05cc6e | |||
76ce7a9034 |
1
changelog.d/17281.feature
Normal file
1
changelog.d/17281.feature
Normal file
|
@ -0,0 +1 @@
|
|||
Add `is_encrypted` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
|
1
changelog.d/17284.feature
Normal file
1
changelog.d/17284.feature
Normal file
|
@ -0,0 +1 @@
|
|||
Do not require user-interactive authentication for uploading cross-signing keys for the first time, per MSC3967.
|
|
@ -393,9 +393,6 @@ class ExperimentalConfig(Config):
|
|||
# MSC3391: Removing account data.
|
||||
self.msc3391_enabled = experimental.get("msc3391_enabled", False)
|
||||
|
||||
# MSC3967: Do not require UIA when first uploading cross signing keys
|
||||
self.msc3967_enabled = experimental.get("msc3967_enabled", False)
|
||||
|
||||
# MSC3861: Matrix architecture change to delegate authentication via OIDC
|
||||
try:
|
||||
self.msc3861 = MSC3861(**experimental.get("msc3861", {}))
|
||||
|
|
|
@ -22,10 +22,11 @@ from typing import TYPE_CHECKING, AbstractSet, Dict, List, Optional
|
|||
|
||||
from immutabledict import immutabledict
|
||||
|
||||
from synapse.api.constants import AccountDataTypes, Membership
|
||||
from synapse.api.constants import AccountDataTypes, EventTypes, Membership
|
||||
from synapse.events import EventBase
|
||||
from synapse.types import Requester, RoomStreamToken, StreamToken, UserID
|
||||
from synapse.types.handlers import OperationType, SlidingSyncConfig, SlidingSyncResult
|
||||
from synapse.types.state import StateFilter
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
@ -57,6 +58,7 @@ class SlidingSyncHandler:
|
|||
def __init__(self, hs: "HomeServer"):
|
||||
self.clock = hs.get_clock()
|
||||
self.store = hs.get_datastores().main
|
||||
self.storage_controllers = hs.get_storage_controllers()
|
||||
self.auth_blocking = hs.get_auth_blocking()
|
||||
self.notifier = hs.get_notifier()
|
||||
self.event_sources = hs.get_event_sources()
|
||||
|
@ -523,8 +525,26 @@ class SlidingSyncHandler:
|
|||
if filters.spaces:
|
||||
raise NotImplementedError()
|
||||
|
||||
if filters.is_encrypted:
|
||||
raise NotImplementedError()
|
||||
# Filter for encrypted rooms
|
||||
if filters.is_encrypted is not None:
|
||||
# Make a copy so we don't run into an error: `Set changed size during
|
||||
# iteration`, when we filter out and remove items
|
||||
for room_id in list(filtered_room_id_set):
|
||||
state_at_to_token = await self.storage_controllers.state.get_state_at(
|
||||
room_id,
|
||||
to_token,
|
||||
state_filter=StateFilter.from_types(
|
||||
[(EventTypes.RoomEncryption, "")]
|
||||
),
|
||||
)
|
||||
is_encrypted = state_at_to_token.get((EventTypes.RoomEncryption, ""))
|
||||
|
||||
# If we're looking for encrypted rooms, filter out rooms that are not
|
||||
# encrypted and vice versa
|
||||
if (filters.is_encrypted and not is_encrypted) or (
|
||||
not filters.is_encrypted and is_encrypted
|
||||
):
|
||||
filtered_room_id_set.remove(room_id)
|
||||
|
||||
if filters.is_invite:
|
||||
raise NotImplementedError()
|
||||
|
|
|
@ -979,89 +979,6 @@ class SyncHandler:
|
|||
bundled_aggregations=bundled_aggregations,
|
||||
)
|
||||
|
||||
async def get_state_after_event(
|
||||
self,
|
||||
event_id: str,
|
||||
state_filter: Optional[StateFilter] = None,
|
||||
await_full_state: bool = True,
|
||||
) -> StateMap[str]:
|
||||
"""
|
||||
Get the room state after the given event
|
||||
|
||||
Args:
|
||||
event_id: event of interest
|
||||
state_filter: The state filter used to fetch state from the database.
|
||||
await_full_state: if `True`, will block if we do not yet have complete state
|
||||
at the event and `state_filter` is not satisfied by partial state.
|
||||
Defaults to `True`.
|
||||
"""
|
||||
state_ids = await self._state_storage_controller.get_state_ids_for_event(
|
||||
event_id,
|
||||
state_filter=state_filter or StateFilter.all(),
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
# using get_metadata_for_events here (instead of get_event) sidesteps an issue
|
||||
# with redactions: if `event_id` is a redaction event, and we don't have the
|
||||
# original (possibly because it got purged), get_event will refuse to return
|
||||
# the redaction event, which isn't terribly helpful here.
|
||||
#
|
||||
# (To be fair, in that case we could assume it's *not* a state event, and
|
||||
# therefore we don't need to worry about it. But still, it seems cleaner just
|
||||
# to pull the metadata.)
|
||||
m = (await self.store.get_metadata_for_events([event_id]))[event_id]
|
||||
if m.state_key is not None and m.rejection_reason is None:
|
||||
state_ids = dict(state_ids)
|
||||
state_ids[(m.event_type, m.state_key)] = event_id
|
||||
|
||||
return state_ids
|
||||
|
||||
async def get_state_at(
|
||||
self,
|
||||
room_id: str,
|
||||
stream_position: StreamToken,
|
||||
state_filter: Optional[StateFilter] = None,
|
||||
await_full_state: bool = True,
|
||||
) -> StateMap[str]:
|
||||
"""Get the room state at a particular stream position
|
||||
|
||||
Args:
|
||||
room_id: room for which to get state
|
||||
stream_position: point at which to get state
|
||||
state_filter: The state filter used to fetch state from the database.
|
||||
await_full_state: if `True`, will block if we do not yet have complete state
|
||||
at the last event in the room before `stream_position` and
|
||||
`state_filter` is not satisfied by partial state. Defaults to `True`.
|
||||
"""
|
||||
# FIXME: This gets the state at the latest event before the stream ordering,
|
||||
# which might not be the same as the "current state" of the room at the time
|
||||
# of the stream token if there were multiple forward extremities at the time.
|
||||
last_event_id = await self.store.get_last_event_in_room_before_stream_ordering(
|
||||
room_id,
|
||||
end_token=stream_position.room_key,
|
||||
)
|
||||
|
||||
if last_event_id:
|
||||
state = await self.get_state_after_event(
|
||||
last_event_id,
|
||||
state_filter=state_filter or StateFilter.all(),
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
else:
|
||||
# no events in this room - so presumably no state
|
||||
state = {}
|
||||
|
||||
# (erikj) This should be rarely hit, but we've had some reports that
|
||||
# we get more state down gappy syncs than we should, so let's add
|
||||
# some logging.
|
||||
logger.info(
|
||||
"Failed to find any events in room %s at %s",
|
||||
room_id,
|
||||
stream_position.room_key,
|
||||
)
|
||||
return state
|
||||
|
||||
async def compute_summary(
|
||||
self,
|
||||
room_id: str,
|
||||
|
@ -1435,7 +1352,7 @@ class SyncHandler:
|
|||
await_full_state = True
|
||||
lazy_load_members = False
|
||||
|
||||
state_at_timeline_end = await self.get_state_at(
|
||||
state_at_timeline_end = await self._state_storage_controller.get_state_at(
|
||||
room_id,
|
||||
stream_position=end_token,
|
||||
state_filter=state_filter,
|
||||
|
@ -1563,7 +1480,7 @@ class SyncHandler:
|
|||
else:
|
||||
# We can get here if the user has ignored the senders of all
|
||||
# the recent events.
|
||||
state_at_timeline_start = await self.get_state_at(
|
||||
state_at_timeline_start = await self._state_storage_controller.get_state_at(
|
||||
room_id,
|
||||
stream_position=end_token,
|
||||
state_filter=state_filter,
|
||||
|
@ -1585,14 +1502,14 @@ class SyncHandler:
|
|||
# about them).
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
state_at_previous_sync = await self.get_state_at(
|
||||
state_at_previous_sync = await self._state_storage_controller.get_state_at(
|
||||
room_id,
|
||||
stream_position=since_token,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
state_at_timeline_end = await self.get_state_at(
|
||||
state_at_timeline_end = await self._state_storage_controller.get_state_at(
|
||||
room_id,
|
||||
stream_position=end_token,
|
||||
state_filter=state_filter,
|
||||
|
@ -2591,7 +2508,7 @@ class SyncHandler:
|
|||
continue
|
||||
|
||||
if room_id in sync_result_builder.joined_room_ids or has_join:
|
||||
old_state_ids = await self.get_state_at(
|
||||
old_state_ids = await self._state_storage_controller.get_state_at(
|
||||
room_id,
|
||||
since_token,
|
||||
state_filter=StateFilter.from_types([(EventTypes.Member, user_id)]),
|
||||
|
@ -2621,12 +2538,14 @@ class SyncHandler:
|
|||
newly_left_rooms.append(room_id)
|
||||
else:
|
||||
if not old_state_ids:
|
||||
old_state_ids = await self.get_state_at(
|
||||
room_id,
|
||||
since_token,
|
||||
state_filter=StateFilter.from_types(
|
||||
[(EventTypes.Member, user_id)]
|
||||
),
|
||||
old_state_ids = (
|
||||
await self._state_storage_controller.get_state_at(
|
||||
room_id,
|
||||
since_token,
|
||||
state_filter=StateFilter.from_types(
|
||||
[(EventTypes.Member, user_id)]
|
||||
),
|
||||
)
|
||||
)
|
||||
old_mem_ev_id = old_state_ids.get(
|
||||
(EventTypes.Member, user_id), None
|
||||
|
|
|
@ -41,7 +41,6 @@ class ExperimentalFeature(str, Enum):
|
|||
|
||||
MSC3026 = "msc3026"
|
||||
MSC3881 = "msc3881"
|
||||
MSC3967 = "msc3967"
|
||||
|
||||
|
||||
class ExperimentalFeaturesRestServlet(RestServlet):
|
||||
|
|
|
@ -382,44 +382,35 @@ class SigningKeyUploadServlet(RestServlet):
|
|||
master_key_updatable_without_uia,
|
||||
) = await self.e2e_keys_handler.check_cross_signing_setup(user_id)
|
||||
|
||||
# Before MSC3967 we required UIA both when setting up cross signing for the
|
||||
# first time and when resetting the device signing key. With MSC3967 we only
|
||||
# require UIA when resetting cross-signing, and not when setting up the first
|
||||
# time. Because there is no UIA in MSC3861, for now we throw an error if the
|
||||
# user tries to reset the device signing key when MSC3861 is enabled, but allow
|
||||
# first-time setup.
|
||||
if self.hs.config.experimental.msc3861.enabled:
|
||||
# The auth service has to explicitly mark the master key as replaceable
|
||||
# without UIA to reset the device signing key with MSC3861.
|
||||
if is_cross_signing_setup and not master_key_updatable_without_uia:
|
||||
config = self.hs.config.experimental.msc3861
|
||||
if config.account_management_url is not None:
|
||||
url = f"{config.account_management_url}?action=org.matrix.cross_signing_reset"
|
||||
else:
|
||||
url = config.issuer
|
||||
# Resending exactly the same keys should just 200 OK without doing a UIA prompt.
|
||||
keys_are_different = await self.e2e_keys_handler.has_different_keys(
|
||||
user_id, body
|
||||
)
|
||||
if not keys_are_different:
|
||||
return 200, {}
|
||||
|
||||
raise SynapseError(
|
||||
HTTPStatus.NOT_IMPLEMENTED,
|
||||
"To reset your end-to-end encryption cross-signing identity, "
|
||||
f"you first need to approve it at {url} and then try again.",
|
||||
Codes.UNRECOGNIZED,
|
||||
)
|
||||
# But first-time setup is fine
|
||||
# The keys are different; is x-signing set up? If no, then this is first-time
|
||||
# setup, and that is allowed without UIA, per MSC3967.
|
||||
# If yes, then we need to authenticate the change.
|
||||
if is_cross_signing_setup:
|
||||
# With MSC3861, UIA is not possible. Instead, the auth service has to
|
||||
# explicitly mark the master key as replaceable.
|
||||
if self.hs.config.experimental.msc3861.enabled:
|
||||
if not master_key_updatable_without_uia:
|
||||
config = self.hs.config.experimental.msc3861
|
||||
if config.account_management_url is not None:
|
||||
url = f"{config.account_management_url}?action=org.matrix.cross_signing_reset"
|
||||
else:
|
||||
url = config.issuer
|
||||
|
||||
elif self.hs.config.experimental.msc3967_enabled:
|
||||
# MSC3967 allows this endpoint to 200 OK for idempotency. Resending exactly the same
|
||||
# keys should just 200 OK without doing a UIA prompt.
|
||||
keys_are_different = await self.e2e_keys_handler.has_different_keys(
|
||||
user_id, body
|
||||
)
|
||||
if not keys_are_different:
|
||||
# FIXME: we do not fallthrough to upload_signing_keys_for_user because confusingly
|
||||
# if we do, we 500 as it looks like it tries to INSERT the same key twice, causing a
|
||||
# unique key constraint violation. This sounds like a bug?
|
||||
return 200, {}
|
||||
# the keys are different, is x-signing set up? If no, then the keys don't exist which is
|
||||
# why they are different. If yes, then we need to UIA to change them.
|
||||
if is_cross_signing_setup:
|
||||
raise SynapseError(
|
||||
HTTPStatus.NOT_IMPLEMENTED,
|
||||
"To reset your end-to-end encryption cross-signing identity, "
|
||||
f"you first need to approve it at {url} and then try again.",
|
||||
Codes.UNRECOGNIZED,
|
||||
)
|
||||
else:
|
||||
# Without MSC3861, we require UIA.
|
||||
await self.auth_handler.validate_user_via_ui_auth(
|
||||
requester,
|
||||
request,
|
||||
|
@ -428,18 +419,6 @@ class SigningKeyUploadServlet(RestServlet):
|
|||
# Do not allow skipping of UIA auth.
|
||||
can_skip_ui_auth=False,
|
||||
)
|
||||
# Otherwise we don't require UIA since we are setting up cross signing for first time
|
||||
else:
|
||||
# Previous behaviour is to always require UIA but allow it to be skipped
|
||||
await self.auth_handler.validate_user_via_ui_auth(
|
||||
requester,
|
||||
request,
|
||||
body,
|
||||
"add a device signing key to your account",
|
||||
# Allow skipping of UI auth since this is frequently called directly
|
||||
# after login and it is silly to ask users to re-auth immediately.
|
||||
can_skip_ui_auth=True,
|
||||
)
|
||||
|
||||
result = await self.e2e_keys_handler.upload_signing_keys_for_user(user_id, body)
|
||||
return 200, result
|
||||
|
|
|
@ -45,7 +45,7 @@ from synapse.storage.util.partial_state_events_tracker import (
|
|||
PartialStateEventsTracker,
|
||||
)
|
||||
from synapse.synapse_rust.acl import ServerAclEvaluator
|
||||
from synapse.types import MutableStateMap, StateMap, get_domain_from_id
|
||||
from synapse.types import MutableStateMap, StateMap, StreamToken, get_domain_from_id
|
||||
from synapse.types.state import StateFilter
|
||||
from synapse.util.async_helpers import Linearizer
|
||||
from synapse.util.caches import intern_string
|
||||
|
@ -372,6 +372,91 @@ class StateStorageController:
|
|||
)
|
||||
return state_map[event_id]
|
||||
|
||||
async def get_state_after_event(
|
||||
self,
|
||||
event_id: str,
|
||||
state_filter: Optional[StateFilter] = None,
|
||||
await_full_state: bool = True,
|
||||
) -> StateMap[str]:
|
||||
"""
|
||||
Get the room state after the given event
|
||||
|
||||
Args:
|
||||
event_id: event of interest
|
||||
state_filter: The state filter used to fetch state from the database.
|
||||
await_full_state: if `True`, will block if we do not yet have complete state
|
||||
at the event and `state_filter` is not satisfied by partial state.
|
||||
Defaults to `True`.
|
||||
"""
|
||||
state_ids = await self.get_state_ids_for_event(
|
||||
event_id,
|
||||
state_filter=state_filter or StateFilter.all(),
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
# using get_metadata_for_events here (instead of get_event) sidesteps an issue
|
||||
# with redactions: if `event_id` is a redaction event, and we don't have the
|
||||
# original (possibly because it got purged), get_event will refuse to return
|
||||
# the redaction event, which isn't terribly helpful here.
|
||||
#
|
||||
# (To be fair, in that case we could assume it's *not* a state event, and
|
||||
# therefore we don't need to worry about it. But still, it seems cleaner just
|
||||
# to pull the metadata.)
|
||||
m = (await self.stores.main.get_metadata_for_events([event_id]))[event_id]
|
||||
if m.state_key is not None and m.rejection_reason is None:
|
||||
state_ids = dict(state_ids)
|
||||
state_ids[(m.event_type, m.state_key)] = event_id
|
||||
|
||||
return state_ids
|
||||
|
||||
async def get_state_at(
|
||||
self,
|
||||
room_id: str,
|
||||
stream_position: StreamToken,
|
||||
state_filter: Optional[StateFilter] = None,
|
||||
await_full_state: bool = True,
|
||||
) -> StateMap[str]:
|
||||
"""Get the room state at a particular stream position
|
||||
|
||||
Args:
|
||||
room_id: room for which to get state
|
||||
stream_position: point at which to get state
|
||||
state_filter: The state filter used to fetch state from the database.
|
||||
await_full_state: if `True`, will block if we do not yet have complete state
|
||||
at the last event in the room before `stream_position` and
|
||||
`state_filter` is not satisfied by partial state. Defaults to `True`.
|
||||
"""
|
||||
# FIXME: This gets the state at the latest event before the stream ordering,
|
||||
# which might not be the same as the "current state" of the room at the time
|
||||
# of the stream token if there were multiple forward extremities at the time.
|
||||
last_event_id = (
|
||||
await self.stores.main.get_last_event_in_room_before_stream_ordering(
|
||||
room_id,
|
||||
end_token=stream_position.room_key,
|
||||
)
|
||||
)
|
||||
|
||||
if last_event_id:
|
||||
state = await self.get_state_after_event(
|
||||
last_event_id,
|
||||
state_filter=state_filter or StateFilter.all(),
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
else:
|
||||
# no events in this room - so presumably no state
|
||||
state = {}
|
||||
|
||||
# (erikj) This should be rarely hit, but we've had some reports that
|
||||
# we get more state down gappy syncs than we should, so let's add
|
||||
# some logging.
|
||||
logger.info(
|
||||
"Failed to find any events in room %s at %s",
|
||||
room_id,
|
||||
stream_position.room_key,
|
||||
)
|
||||
return state
|
||||
|
||||
@trace
|
||||
@tag_args
|
||||
async def get_state_for_groups(
|
||||
|
|
|
@ -541,6 +541,8 @@ class MSC3861OAuthDelegation(HomeserverTestCase):
|
|||
|
||||
self.assertEqual(channel.code, 200, channel.json_body)
|
||||
|
||||
# Try uploading *different* keys; it should cause a 501 error.
|
||||
keys_upload_body = self.make_device_keys(USER_ID, DEVICE)
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
"/_matrix/client/v3/keys/device_signing/upload",
|
||||
|
|
|
@ -1244,3 +1244,60 @@ class FilterRoomsTestCase(HomeserverTestCase):
|
|||
)
|
||||
|
||||
self.assertEqual(falsy_filtered_room_ids, {room_id})
|
||||
|
||||
def test_filter_encrypted_rooms(self) -> None:
|
||||
"""
|
||||
Test `filter.is_encrypted` for encrypted rooms
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
# Create a normal room
|
||||
room_id = self.helper.create_room_as(
|
||||
user1_id,
|
||||
is_public=False,
|
||||
tok=user1_tok,
|
||||
)
|
||||
|
||||
# Create an encrypted room
|
||||
encrypted_room_id = self.helper.create_room_as(
|
||||
user1_id,
|
||||
is_public=False,
|
||||
tok=user1_tok,
|
||||
)
|
||||
self.helper.send_state(
|
||||
encrypted_room_id,
|
||||
EventTypes.RoomEncryption,
|
||||
{"algorithm": "m.megolm.v1.aes-sha2"},
|
||||
tok=user1_tok,
|
||||
)
|
||||
|
||||
after_rooms_token = self.event_sources.get_current_token()
|
||||
|
||||
# Try with `is_encrypted=True`
|
||||
truthy_filtered_room_ids = self.get_success(
|
||||
self.sliding_sync_handler.filter_rooms(
|
||||
UserID.from_string(user1_id),
|
||||
{room_id, encrypted_room_id},
|
||||
SlidingSyncConfig.SlidingSyncList.Filters(
|
||||
is_encrypted=True,
|
||||
),
|
||||
after_rooms_token,
|
||||
)
|
||||
)
|
||||
|
||||
self.assertEqual(truthy_filtered_room_ids, {encrypted_room_id})
|
||||
|
||||
# Try with `is_encrypted=False`
|
||||
falsy_filtered_room_ids = self.get_success(
|
||||
self.sliding_sync_handler.filter_rooms(
|
||||
UserID.from_string(user1_id),
|
||||
{room_id, encrypted_room_id},
|
||||
SlidingSyncConfig.SlidingSyncList.Filters(
|
||||
is_encrypted=False,
|
||||
),
|
||||
after_rooms_token,
|
||||
)
|
||||
)
|
||||
|
||||
self.assertEqual(falsy_filtered_room_ids, {room_id})
|
||||
|
|
|
@ -435,10 +435,6 @@ class ExperimentalFeaturesTestCase(unittest.HomeserverTestCase):
|
|||
True,
|
||||
channel.json_body["features"]["msc3881"],
|
||||
)
|
||||
self.assertEqual(
|
||||
False,
|
||||
channel.json_body["features"]["msc3967"],
|
||||
)
|
||||
|
||||
# test nothing blows up if you try to disable a feature that isn't already enabled
|
||||
url = f"{self.url}/{self.other_user}"
|
||||
|
|
|
@ -155,71 +155,6 @@ class KeyQueryTestCase(unittest.HomeserverTestCase):
|
|||
}
|
||||
|
||||
def test_device_signing_with_uia(self) -> None:
|
||||
"""Device signing key upload requires UIA."""
|
||||
password = "wonderland"
|
||||
device_id = "ABCDEFGHI"
|
||||
alice_id = self.register_user("alice", password)
|
||||
alice_token = self.login("alice", password, device_id=device_id)
|
||||
|
||||
content = self.make_device_keys(alice_id, device_id)
|
||||
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
"/_matrix/client/v3/keys/device_signing/upload",
|
||||
content,
|
||||
alice_token,
|
||||
)
|
||||
|
||||
self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result)
|
||||
# Grab the session
|
||||
session = channel.json_body["session"]
|
||||
# Ensure that flows are what is expected.
|
||||
self.assertIn({"stages": ["m.login.password"]}, channel.json_body["flows"])
|
||||
|
||||
# add UI auth
|
||||
content["auth"] = {
|
||||
"type": "m.login.password",
|
||||
"identifier": {"type": "m.id.user", "user": alice_id},
|
||||
"password": password,
|
||||
"session": session,
|
||||
}
|
||||
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
"/_matrix/client/v3/keys/device_signing/upload",
|
||||
content,
|
||||
alice_token,
|
||||
)
|
||||
|
||||
self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
|
||||
|
||||
@override_config({"ui_auth": {"session_timeout": "15m"}})
|
||||
def test_device_signing_with_uia_session_timeout(self) -> None:
|
||||
"""Device signing key upload requires UIA buy passes with grace period."""
|
||||
password = "wonderland"
|
||||
device_id = "ABCDEFGHI"
|
||||
alice_id = self.register_user("alice", password)
|
||||
alice_token = self.login("alice", password, device_id=device_id)
|
||||
|
||||
content = self.make_device_keys(alice_id, device_id)
|
||||
|
||||
channel = self.make_request(
|
||||
"POST",
|
||||
"/_matrix/client/v3/keys/device_signing/upload",
|
||||
content,
|
||||
alice_token,
|
||||
)
|
||||
|
||||
self.assertEqual(channel.code, HTTPStatus.OK, channel.result)
|
||||
|
||||
@override_config(
|
||||
{
|
||||
"experimental_features": {"msc3967_enabled": True},
|
||||
"ui_auth": {"session_timeout": "15s"},
|
||||
}
|
||||
)
|
||||
def test_device_signing_with_msc3967(self) -> None:
|
||||
"""Device signing key follows MSC3967 behaviour when enabled."""
|
||||
password = "wonderland"
|
||||
device_id = "ABCDEFGHI"
|
||||
alice_id = self.register_user("alice", password)
|
||||
|
|
Loading…
Reference in a new issue