[ENG-1318, ENG-1199, ENG-931, ENG-1046] Ability to disable mDNS discovery (#1620)

* let me create pr

* a whole lotta changes

* split `p2p_manager.rs` into smaller files

* the arcpocalypse is over

* minor generic cleanup

* wip removing 'MetadataManager'

* more wip

* wip: i am changing branch

* discovery2 -> discovery

* make it somewhat compile

* more wip

* wip: reassembling manager stream

* state more goodly

* wip

* more wip

* removing generic from sd_p2p::Manager

* reassemble networked libraries

* wip: hooking back up mDNS

* multi-flume wip

* contain bad code to a single file

* p2p_manager_actor + split handlers into file per operation

* cleanup after restructure

* cleaning up more

* wip: reenable resync

* wip: remote identity in connection payload

* track connected clients (required for `service.rs`)

* a big ass iterator

* working towards finishing `service.rs`

* service shutdown

* hook up listen channel in service

* fix address resolution

* merge nlm stuff into LibrariesService

* finish library to service mapping

* less footguns in p2p - seal `PeerId`

* fix previous pr

* p2p state rspc query

* send node events to the frontend

* minor

* wip

* more worky, less crashy

* make spacedrop work + debug state

* fix mdns expiry

* clippy

* other clippy

* remove old tests

* add tests back

---------

Co-authored-by: Brendan Allan <brendonovich@outlook.com>
This commit is contained in:
Oscar Beaumont 2023-10-30 19:24:04 +11:00 committed by GitHub
parent 8e7e8cd0e9
commit 02775921ef
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
51 changed files with 2645 additions and 2011 deletions

80
Cargo.lock generated
View file

@ -519,9 +519,9 @@ checksum = "0ea22880d78093b0cbe17c89f64a7d457941e65759157ec6cb31a31d652b05e5"
[[package]]
name = "base64"
version = "0.21.4"
version = "0.21.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2"
checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9"
[[package]]
name = "base64ct"
@ -2185,9 +2185,9 @@ dependencies = [
[[package]]
name = "futures"
version = "0.3.28"
version = "0.3.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40"
checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335"
dependencies = [
"futures-channel",
"futures-core",
@ -2200,9 +2200,9 @@ dependencies = [
[[package]]
name = "futures-channel"
version = "0.3.28"
version = "0.3.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2"
checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb"
dependencies = [
"futures-core",
"futures-sink",
@ -2223,15 +2223,15 @@ dependencies = [
[[package]]
name = "futures-core"
version = "0.3.28"
version = "0.3.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c"
checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c"
[[package]]
name = "futures-executor"
version = "0.3.28"
version = "0.3.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0"
checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc"
dependencies = [
"futures-core",
"futures-task",
@ -2241,9 +2241,9 @@ dependencies = [
[[package]]
name = "futures-io"
version = "0.3.28"
version = "0.3.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964"
checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa"
[[package]]
name = "futures-lite"
@ -2273,9 +2273,9 @@ dependencies = [
[[package]]
name = "futures-macro"
version = "0.3.28"
version = "0.3.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72"
checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb"
dependencies = [
"proc-macro2",
"quote",
@ -2294,15 +2294,15 @@ dependencies = [
[[package]]
name = "futures-sink"
version = "0.3.28"
version = "0.3.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e"
checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817"
[[package]]
name = "futures-task"
version = "0.3.28"
version = "0.3.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65"
checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2"
[[package]]
name = "futures-ticker"
@ -2323,9 +2323,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c"
[[package]]
name = "futures-util"
version = "0.3.28"
version = "0.3.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533"
checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104"
dependencies = [
"futures-channel",
"futures-core",
@ -2745,7 +2745,7 @@ version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06683b93020a07e3dbcf5f8c0f6d40080d725bea7936fc01ad345c01b97dc270"
dependencies = [
"base64 0.21.4",
"base64 0.21.5",
"bytes",
"headers-core",
"http",
@ -3674,7 +3674,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2d157562dba6017193e5285acf6b1054759e83540bfd79f75b69d6ce774c88da"
dependencies = [
"asynchronous-codec",
"base64 0.21.4",
"base64 0.21.5",
"byteorder",
"bytes",
"either",
@ -5371,7 +5371,7 @@ version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bdc0001cfea3db57a2e24bc0d818e9e20e554b5f97fabb9bc231dc240269ae06"
dependencies = [
"base64 0.21.4",
"base64 0.21.5",
"indexmap 1.9.3",
"line-wrap",
"quick-xml",
@ -6234,7 +6234,7 @@ version = "0.11.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3e9ad3fe7488d7e34558a2033d45a0c90b72d97b4f80705666fea71472e2e6a1"
dependencies = [
"base64 0.21.4",
"base64 0.21.5",
"bytes",
"encoding_rs",
"futures-core",
@ -6680,7 +6680,7 @@ dependencies = [
"async-stream",
"async-trait",
"axum",
"base64 0.21.4",
"base64 0.21.5",
"blake3",
"bytes",
"chrono",
@ -6732,6 +6732,7 @@ dependencies = [
"slotmap",
"specta",
"static_assertions",
"streamunordered",
"strum",
"strum_macros",
"sysinfo",
@ -6945,19 +6946,23 @@ name = "sd-p2p"
version = "0.1.0"
dependencies = [
"arc-swap",
"base64 0.21.5",
"ed25519-dalek",
"flume",
"hex",
"futures-core",
"if-watch",
"libp2p",
"libp2p-quic",
"mdns-sd",
"pin-project-lite",
"rand_core 0.6.4",
"rmp-serde",
"serde",
"specta",
"streamunordered",
"thiserror",
"tokio",
"tokio-stream",
"tokio-util",
"tracing 0.2.0",
"tracing-subscriber 0.3.0",
@ -7202,7 +7207,7 @@ version = "3.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1ca3b16a3d82c4088f343b7480a93550b3eabe1a358569c2dfe38bbcead07237"
dependencies = [
"base64 0.21.4",
"base64 0.21.5",
"chrono",
"hex",
"indexmap 1.9.3",
@ -7661,6 +7666,18 @@ version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
[[package]]
name = "streamunordered"
version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "96b49452854e4b797cc320ffc3d88d88a08da8cd675f01958f650a14066f9a16"
dependencies = [
"futures-core",
"futures-sink",
"futures-util",
"slab",
]
[[package]]
name = "strict-num"
version = "0.1.1"
@ -7746,7 +7763,7 @@ version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1bbdb58577b6301f8d17ae2561f32002a5bae056d444e0f69e611e504a276204"
dependencies = [
"base64 0.21.4",
"base64 0.21.5",
"serde",
"serde_json",
]
@ -7960,7 +7977,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9bfe673cf125ef364d6f56b15e8ce7537d9ca7e4dae1cf6fbbdeed2e024db3d9"
dependencies = [
"anyhow",
"base64 0.21.4",
"base64 0.21.5",
"bytes",
"cocoa",
"dirs-next",
@ -8036,7 +8053,7 @@ version = "1.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7b3475e55acec0b4a50fb96435f19631fb58cbcd31923e1a213de5c382536bbb"
dependencies = [
"base64 0.21.4",
"base64 0.21.5",
"brotli",
"ico",
"json-patch",
@ -8376,6 +8393,7 @@ dependencies = [
"futures-core",
"pin-project-lite",
"tokio",
"tokio-util",
]
[[package]]
@ -8937,7 +8955,7 @@ version = "0.36.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c51daa774fe9ee5efcf7b4fec13019b8119cda764d9a8b5b06df02bb1445c656"
dependencies = [
"base64 0.21.4",
"base64 0.21.5",
"log",
"pico-args",
"usvg-parser",

View file

@ -111,6 +111,7 @@ pin-project-lite = "0.2.13"
bytes = "1.5.0"
reqwest = { version = "0.11.20", features = ["json", "native-tls-vendored"] }
directories = "5.0.1"
streamunordered = "0.5.3"
# Override features of transitive dependencies
[dependencies.openssl]

View file

@ -1,11 +1,11 @@
use rspc::{alpha::AlphaRouter, ErrorCode};
use sd_p2p::PeerId;
use sd_p2p::spacetunnel::RemoteIdentity;
use serde::Deserialize;
use specta::Type;
use std::path::PathBuf;
use uuid::Uuid;
use crate::p2p::{P2PEvent, PairingDecision};
use crate::p2p::{operations, P2PEvent, PairingDecision};
use super::{Ctx, R};
@ -16,19 +16,18 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
let mut rx = node.p2p.subscribe();
async_stream::stream! {
// TODO: Don't block subscription start
for peer in node.p2p.manager.get_discovered_peers().await {
yield P2PEvent::DiscoveredPeer {
peer_id: peer.peer_id,
for peer in node.p2p.node.get_discovered() {
yield P2PEvent::DiscoveredPeer {
identity: peer.identity,
metadata: peer.metadata,
};
}
// TODO: Don't block subscription start
#[allow(clippy::unwrap_used)] // TODO: P2P isn't stable yet lol
for peer_id in node.p2p.manager.get_connected_peers().await.unwrap() {
for identity in node.p2p.manager.get_connected_peers().await.unwrap() {
yield P2PEvent::ConnectedPeer {
peer_id,
identity,
};
}
@ -38,30 +37,29 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
}
})
})
.procedure("nlmState", {
R.query(|node, _: ()| async move { node.nlm.state().await })
.procedure("state", {
R.query(|node, _: ()| async move { node.p2p.state() })
})
.procedure("spacedrop", {
#[derive(Type, Deserialize)]
pub struct SpacedropArgs {
peer_id: PeerId,
identity: RemoteIdentity,
file_path: Vec<String>,
}
R.mutation(|node, args: SpacedropArgs| async move {
node.p2p
.clone()
.spacedrop(
args.peer_id,
args.file_path
.into_iter()
.map(PathBuf::from)
.collect::<Vec<_>>(),
)
.await
.map_err(|_err| {
rspc::Error::new(ErrorCode::InternalServerError, "todo: error".into())
})
operations::spacedrop(
node.p2p.clone(),
args.identity,
args.file_path
.into_iter()
.map(PathBuf::from)
.collect::<Vec<_>>(),
)
.await
.map_err(|_err| {
rspc::Error::new(ErrorCode::InternalServerError, "todo: error".into())
})
})
})
.procedure("acceptSpacedrop", {
@ -76,7 +74,7 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
R.mutation(|node, id: Uuid| async move { node.p2p.cancel_spacedrop(id).await })
})
.procedure("pair", {
R.mutation(|node, id: PeerId| async move {
R.mutation(|node, id: RemoteIdentity| async move {
node.p2p.pairing.clone().originator(id, node).await
})
})

View file

@ -3,7 +3,7 @@ use crate::{
library::Library,
location::file_path_helper::{file_path_to_handle_custom_uri, IsolatedFilePathData},
object::media::thumbnail::WEBP_EXTENSION,
p2p::{sync::InstanceState, IdentityOrRemoteIdentity},
p2p::{operations, IdentityOrRemoteIdentity},
prisma::{file_path, location},
util::{db::*, InfallibleResponse},
Node,
@ -240,36 +240,26 @@ pub fn router(node: Arc<Node>) -> Router<()> {
}
// TODO: Support `Range` requests and `ETag` headers
#[allow(clippy::unwrap_used)]
match *state
.node
.nlm
.state()
.await
.get(&library.id)
.unwrap()
.instances
.get(&identity)
.unwrap()
{
InstanceState::Discovered(_) | InstanceState::Unavailable => {
Ok(not_found(()))
}
InstanceState::Connected(peer_id) => {
#[allow(clippy::unwrap_used)] // TODO: Error handling needed
match state.node.p2p.get_library_service(&library.id) {
Some(service) => {
let stream = service
.connect(state.node.p2p.manager.clone(), &identity)
.await
.unwrap();
let (tx, mut rx) =
tokio::sync::mpsc::channel::<io::Result<Bytes>>(150);
// TODO: We only start a thread because of stupid `ManagerStreamAction2` and libp2p's `!Send/!Sync` bounds on a stream.
let node = state.node.clone();
tokio::spawn(async move {
node.p2p
.request_file(
peer_id,
&library,
file_path_pub_id,
Range::Full,
MpscToAsyncWrite::new(PollSender::new(tx)),
)
.await;
operations::request_file(
stream,
&library,
file_path_pub_id,
Range::Full,
MpscToAsyncWrite::new(PollSender::new(tx)),
)
.await;
});
// TODO: Content Type
@ -281,6 +271,7 @@ pub fn router(node: Arc<Node>) -> Router<()> {
})),
))
}
None => Ok(not_found(())),
}
}
}

View file

@ -4,7 +4,6 @@ use crate::{
api::{CoreEvent, Router},
location::LocationManagerError,
object::media::thumbnail::actor::Thumbnailer,
p2p::sync::NetworkedLibraries,
};
use api::notifications::{Notification, NotificationData, NotificationId};
@ -65,7 +64,6 @@ pub struct Node {
pub p2p: Arc<p2p::P2PManager>,
pub event_bus: (broadcast::Sender<CoreEvent>, broadcast::Receiver<CoreEvent>),
pub notifications: Notifications,
pub nlm: Arc<NetworkedLibraries>,
pub thumbnailer: Thumbnailer,
pub files_over_p2p_flag: Arc<AtomicBool>,
pub env: env::Env,
@ -100,16 +98,14 @@ impl Node {
.await
.map_err(NodeError::FailedToInitializeConfig)?;
let (p2p, p2p_stream) = p2p::P2PManager::new(config.clone()).await?;
let (locations, locations_actor) = location::Locations::new();
let (jobs, jobs_actor) = job::Jobs::new();
let libraries = library::Libraries::new(data_dir.join("libraries")).await?;
let (p2p, p2p_actor) = p2p::P2PManager::new(config.clone(), libraries.clone()).await?;
let node = Arc::new(Node {
data_dir: data_dir.to_path_buf(),
jobs,
locations,
nlm: NetworkedLibraries::new(p2p.clone(), &libraries),
notifications: notifications::Notifications::new(),
p2p,
config,
@ -141,7 +137,7 @@ impl Node {
locations_actor.start(node.clone());
node.libraries.init(&node).await?;
jobs_actor.start(node.clone());
node.p2p.start(p2p_stream, node.clone());
p2p_actor.start(node.clone());
let router = api::mount();

View file

@ -457,7 +457,7 @@ impl Libraries {
InvalidateOperationEvent::all(),
)),
SyncMessage::Created => {
p2p::sync::originator(id, &library.sync, &node.nlm, &node.p2p).await
p2p::sync::originator(id, &library.sync, &node.p2p).await
}
}
}

View file

@ -549,7 +549,7 @@ pub async fn relink_location(
.ok_or_else(|| NonUtf8PathError(location_path.into()))?;
sync.write_op(
&db,
db,
sync.shared_update(
prisma_sync::location::SyncId {
pub_id: pub_id.clone(),

View file

@ -22,7 +22,7 @@ impl IdentityOrRemoteIdentity {
match self {
Self::Identity(identity) => identity.to_remote_identity(),
Self::RemoteIdentity(identity) => {
RemoteIdentity::from_bytes(identity.to_bytes().as_slice()).expect("unreachable")
RemoteIdentity::from_bytes(identity.get_bytes().as_slice()).expect("unreachable")
}
}
}
@ -42,7 +42,7 @@ impl IdentityOrRemoteIdentity {
pub fn to_bytes(&self) -> Vec<u8> {
match self {
Self::Identity(identity) => [&[b'I'], &*identity.to_bytes()].concat(),
Self::RemoteIdentity(identity) => [[b'R'].as_slice(), &identity.to_bytes()].concat(),
Self::RemoteIdentity(identity) => [[b'R'].as_slice(), &identity.get_bytes()].concat(),
}
}
}

139
core/src/p2p/libraries.rs Normal file
View file

@ -0,0 +1,139 @@
use std::{
collections::HashMap,
fmt,
sync::{Arc, PoisonError, RwLock},
};
use sd_p2p::Service;
use tokio::sync::mpsc;
use tracing::{error, warn};
use uuid::Uuid;
use crate::library::{Libraries, Library, LibraryManagerEvent};
use super::{IdentityOrRemoteIdentity, LibraryMetadata, P2PManager};
pub struct LibraryServices {
services: RwLock<HashMap<Uuid, Arc<Service<LibraryMetadata>>>>,
register_service_tx: mpsc::Sender<Arc<Service<LibraryMetadata>>>,
}
impl fmt::Debug for LibraryServices {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("LibraryServices")
.field("services", &self.services.read().unwrap().keys())
.finish()
}
}
impl LibraryServices {
pub fn new(register_service_tx: mpsc::Sender<Arc<Service<LibraryMetadata>>>) -> Self {
Self {
services: Default::default(),
register_service_tx,
}
}
pub(crate) async fn start(manager: Arc<P2PManager>, libraries: Arc<Libraries>) {
if let Err(err) = libraries
.rx
.clone()
.subscribe(|msg| {
let manager = manager.clone();
async move {
match msg {
LibraryManagerEvent::InstancesModified(library)
| LibraryManagerEvent::Load(library) => {
manager
.clone()
.libraries
.load_library(manager, &library)
.await
}
LibraryManagerEvent::Edit(library) => {
manager.libraries.edit_library(&library).await
}
LibraryManagerEvent::Delete(library) => {
manager.libraries.delete_library(&library).await
}
}
}
})
.await
{
error!("Core may become unstable! `LibraryServices::start` manager aborted with error: {err:?}");
}
}
pub fn get(&self, id: &Uuid) -> Option<Arc<Service<LibraryMetadata>>> {
self.services
.read()
.unwrap_or_else(PoisonError::into_inner)
.get(id)
.cloned()
}
pub fn libraries(&self) -> Vec<(Uuid, Arc<Service<LibraryMetadata>>)> {
self.services
.read()
.unwrap_or_else(PoisonError::into_inner)
.iter()
.map(|(k, v)| (*k, v.clone()))
.collect::<Vec<_>>()
}
pub(crate) async fn load_library(&self, manager: Arc<P2PManager>, library: &Library) {
let identities = library
.db
.instance()
.find_many(vec![])
.exec()
.await
.unwrap()
.into_iter()
.filter_map(
// TODO: Error handling
|i| match IdentityOrRemoteIdentity::from_bytes(&i.identity).unwrap() {
IdentityOrRemoteIdentity::Identity(_) => None,
IdentityOrRemoteIdentity::RemoteIdentity(identity) => Some(identity),
},
)
.collect();
let mut inserted = false;
let service = {
let mut service = self
.services
.write()
.unwrap_or_else(PoisonError::into_inner);
let service = service.entry(library.id).or_insert_with(|| {
inserted = true;
Arc::new(Service::new(library.id.to_string(), manager.manager.clone()).unwrap())
});
service.add_known(identities);
service.clone()
};
if inserted {
service.update(LibraryMetadata {});
if self.register_service_tx.send(service).await.is_err() {
warn!("error sending on 'register_service_tx'. This indicates a bug!");
}
}
}
pub(crate) async fn edit_library(&self, _library: &Library) {
// TODO: Send changes to all connected nodes!
// TODO: Update mdns
}
pub(crate) async fn delete_library(&self, library: &Library) {
drop(
self.services
.write()
.unwrap_or_else(PoisonError::into_inner)
.remove(&library.id),
);
}
}

View file

@ -0,0 +1,21 @@
use std::collections::HashMap;
use sd_p2p::Metadata;
use serde::{Deserialize, Serialize};
use specta::Type;
#[derive(Debug, Clone, Type, Serialize, Deserialize)]
pub struct LibraryMetadata {}
impl Metadata for LibraryMetadata {
fn to_hashmap(self) -> HashMap<String, String> {
HashMap::with_capacity(0)
}
fn from_hashmap(_: &HashMap<String, String>) -> Result<Self, String>
where
Self: Sized,
{
Ok(Self {})
}
}

View file

@ -3,14 +3,23 @@
#![allow(clippy::unnecessary_cast)] // Yeah they aren't necessary on this arch, but they are on others
mod identity_or_remote_identity;
mod libraries;
mod library_metadata;
pub mod operations;
mod p2p_events;
mod p2p_manager;
mod p2p_manager_actor;
mod pairing;
mod peer_metadata;
mod protocol;
pub mod sync;
pub use identity_or_remote_identity::*;
pub use libraries::*;
pub use library_metadata::*;
pub use p2p_events::*;
pub use p2p_manager::*;
pub use p2p_manager_actor::*;
pub use pairing::*;
pub use peer_metadata::*;
pub use protocol::*;

View file

@ -0,0 +1,7 @@
pub mod ping;
pub mod request_file;
pub mod spacedrop;
pub use ping::ping;
pub use request_file::request_file;
pub use spacedrop::spacedrop;

View file

@ -0,0 +1,15 @@
use std::sync::Arc;
use sd_p2p::{spacetime::UnicastStream, PeerMessageEvent};
use tracing::debug;
use crate::p2p::{Header, P2PManager};
/// Send a ping to all peers we are connected to
pub async fn ping(p2p: Arc<P2PManager>) {
p2p.manager.broadcast(Header::Ping.to_bytes()).await;
}
pub(crate) async fn reciever(event: PeerMessageEvent<UnicastStream>) {
debug!("Received ping from peer '{}'", event.identity);
}

View file

@ -0,0 +1,154 @@
use std::{
path::Path,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
};
use sd_p2p::{
spaceblock::{BlockSize, Range, SpaceblockRequest, SpaceblockRequests, Transfer},
spacetime::UnicastStream,
PeerMessageEvent,
};
use sd_prisma::prisma::file_path;
use tokio::{
fs::File,
io::{AsyncReadExt, AsyncWrite, AsyncWriteExt, BufReader},
};
use tracing::debug;
use uuid::Uuid;
use crate::{
library::Library,
location::file_path_helper::{file_path_to_handle_p2p_serve_file, IsolatedFilePathData},
p2p::{Header, HeaderFile},
Node,
};
/// Request a file from the remote machine over P2P. This is used for preview media and quick preview.
///
/// DO NOT USE THIS WITHOUT `node.files_over_p2p_flag == true`
pub async fn request_file(
mut stream: UnicastStream,
library: &Library,
file_path_id: Uuid,
range: Range,
output: impl AsyncWrite + Unpin,
) {
let id = Uuid::new_v4();
// TODO: Tunnel for encryption + authentication
stream
.write_all(
&Header::File(HeaderFile {
id,
library_id: library.id,
file_path_id,
range: range.clone(),
})
.to_bytes(),
)
.await
.unwrap();
let block_size = BlockSize::from_stream(&mut stream).await.unwrap();
let size = stream.read_u64_le().await.unwrap();
Transfer::new(
&SpaceblockRequests {
id,
block_size,
requests: vec![SpaceblockRequest {
// TODO: Removing need for this field in this case
name: "todo".to_string(),
// TODO: Maybe removing need for `size` from this side
size,
range,
}],
},
|percent| {
debug!(
"P2P receiving file path '{}' - progress {}%",
file_path_id, percent
);
},
&Arc::new(AtomicBool::new(false)),
)
.receive(&mut stream, output)
.await;
}
pub(crate) async fn reciever(
node: &Arc<Node>,
HeaderFile {
id,
library_id,
file_path_id,
range,
}: HeaderFile,
event: PeerMessageEvent<UnicastStream>,
) {
let mut stream = event.stream;
if !node.files_over_p2p_flag.load(Ordering::Relaxed) {
panic!("Files over P2P is disabled!");
}
// TODO: Tunnel and authentication
// TODO: Use BufReader
let library = node.libraries.get_library(&library_id).await.unwrap();
let file_path = library
.db
.file_path()
.find_unique(file_path::pub_id::equals(file_path_id.as_bytes().to_vec()))
.select(file_path_to_handle_p2p_serve_file::select())
.exec()
.await
.unwrap()
.unwrap();
let location = file_path.location.as_ref().unwrap();
let location_path = location.path.as_ref().unwrap();
let path = Path::new(location_path)
.join(IsolatedFilePathData::try_from((location.id, &file_path)).unwrap());
debug!("Serving path '{:?}' over P2P", path);
let file = File::open(&path).await.unwrap();
let metadata = file.metadata().await.unwrap();
let block_size = BlockSize::from_size(metadata.len());
stream.write_all(&block_size.to_bytes()).await.unwrap();
stream
.write_all(&metadata.len().to_le_bytes())
.await
.unwrap();
let file = BufReader::new(file);
Transfer::new(
&SpaceblockRequests {
id,
block_size,
requests: vec![SpaceblockRequest {
// TODO: Removing need for this field in this case
name: "todo".to_string(),
size: metadata.len(),
range,
}],
},
|percent| {
debug!(
"P2P loading file path '{}' - progress {}%",
file_path_id, percent
);
},
&Arc::new(AtomicBool::new(false)),
)
.send(&mut stream, file)
.await;
}
// TODO: Unit tests

View file

@ -0,0 +1,272 @@
use std::{
borrow::Cow,
path::PathBuf,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
time::Duration,
};
use futures::future::join_all;
use sd_p2p::{
spaceblock::{BlockSize, Range, SpaceblockRequest, SpaceblockRequests, Transfer},
spacetime::UnicastStream,
spacetunnel::RemoteIdentity,
PeerMessageEvent,
};
use tokio::{
fs::{create_dir_all, File},
io::{AsyncReadExt, AsyncWriteExt, BufReader, BufWriter},
sync::oneshot,
time::{sleep, Instant},
};
use tracing::{debug, info};
use uuid::Uuid;
use crate::p2p::{Header, P2PEvent, P2PManager};
/// The amount of time to wait for a Spacedrop request to be accepted or rejected before it's automatically rejected
pub(crate) const SPACEDROP_TIMEOUT: Duration = Duration::from_secs(60);
// TODO: Proper error handling
pub async fn spacedrop(
p2p: Arc<P2PManager>,
// TODO: Stop using `PeerId`
identity: RemoteIdentity,
paths: Vec<PathBuf>,
) -> Result<Uuid, ()> {
if paths.is_empty() {
return Err(());
}
let (files, requests): (Vec<_>, Vec<_>) = join_all(paths.into_iter().map(|path| async move {
let file = File::open(&path).await?;
let metadata = file.metadata().await?;
let name = path
.file_name()
.map(|v| v.to_string_lossy())
.unwrap_or(Cow::Borrowed(""))
.to_string();
Ok((
(path, file),
SpaceblockRequest {
name,
size: metadata.len(),
range: Range::Full,
},
))
}))
.await
.into_iter()
.collect::<Result<Vec<_>, std::io::Error>>()
.map_err(|_| ())? // TODO: Error handling
.into_iter()
.unzip();
let total_length: u64 = requests.iter().map(|req| req.size).sum();
let id = Uuid::new_v4();
debug!("({id}): starting Spacedrop with peer '{identity}");
let mut stream = p2p.manager.stream(identity).await.map_err(|err| {
debug!("({id}): failed to connect: {err:?}");
// TODO: Proper error
})?;
tokio::spawn(async move {
debug!("({id}): connected, sending header");
let header = Header::Spacedrop(SpaceblockRequests {
id,
block_size: BlockSize::from_size(total_length),
requests,
});
if let Err(err) = stream.write_all(&header.to_bytes()).await {
debug!("({id}): failed to send header: {err}");
return;
}
let Header::Spacedrop(requests) = header else {
unreachable!();
};
debug!("({id}): waiting for response");
let result = tokio::select! {
result = stream.read_u8() => result,
// Add 5 seconds incase the user responded on the deadline and slow network
_ = sleep(SPACEDROP_TIMEOUT + Duration::from_secs(5)) => {
debug!("({id}): timed out, cancelling");
p2p.events.0.send(P2PEvent::SpacedropTimedout { id }).ok();
return;
},
};
match result {
Ok(0) => {
debug!("({id}): Spacedrop was rejected from peer '{identity}'");
p2p.events.0.send(P2PEvent::SpacedropRejected { id }).ok();
return;
}
Ok(1) => {} // Okay
Ok(_) => todo!(), // TODO: Proper error
Err(_) => todo!(), // TODO: Proper error
}
let cancelled = Arc::new(AtomicBool::new(false));
p2p.spacedrop_cancelations
.lock()
.await
.insert(id, cancelled.clone());
debug!("({id}): starting transfer");
let i = Instant::now();
let mut transfer = Transfer::new(
&requests,
|percent| {
p2p.events
.0
.send(P2PEvent::SpacedropProgress { id, percent })
.ok();
},
&cancelled,
);
for (file_id, (path, file)) in files.into_iter().enumerate() {
debug!("({id}): transmitting '{file_id}' from '{path:?}'");
let file = BufReader::new(file);
transfer.send(&mut stream, file).await;
}
debug!("({id}): finished; took '{:?}", i.elapsed());
});
Ok(id)
}
// TODO: Move these off the manager
impl P2PManager {
pub async fn accept_spacedrop(&self, id: Uuid, path: String) {
if let Some(chan) = self.spacedrop_pairing_reqs.lock().await.remove(&id) {
chan.send(Some(path)).unwrap(); // TODO: will fail if timed out
}
}
pub async fn reject_spacedrop(&self, id: Uuid) {
if let Some(chan) = self.spacedrop_pairing_reqs.lock().await.remove(&id) {
chan.send(None).unwrap();
}
}
pub async fn cancel_spacedrop(&self, id: Uuid) {
if let Some(cancelled) = self.spacedrop_cancelations.lock().await.remove(&id) {
cancelled.store(true, Ordering::Relaxed);
}
}
}
pub(crate) async fn reciever(
this: &Arc<P2PManager>,
req: SpaceblockRequests,
event: PeerMessageEvent<UnicastStream>,
) {
let id = req.id;
let mut stream = event.stream;
let (tx, rx) = oneshot::channel();
info!(
"({id}): received '{}' files from peer '{}' with block size '{:?}'",
req.requests.len(),
event.identity,
req.block_size
);
this.spacedrop_pairing_reqs.lock().await.insert(id, tx);
if this
.events
.0
.send(P2PEvent::SpacedropRequest {
id,
identity: event.identity,
peer_name: "Unknown".into(),
// TODO: A better solution to this
// manager
// .get_discovered_peers()
// .await
// .into_iter()
// .find(|p| p.peer_id == event.peer_id)
// .map(|p| p.metadata.name)
// .unwrap_or_else(|| "Unknown".to_string()),
files: req
.requests
.iter()
.map(|req| req.name.clone())
.collect::<Vec<_>>(),
})
.is_err()
{
// No frontend's are active
todo!("Outright reject Spacedrop");
}
tokio::select! {
_ = sleep(SPACEDROP_TIMEOUT) => {
info!("({id}): timeout, rejecting!");
stream.write_all(&[0]).await.unwrap();
stream.flush().await.unwrap();
}
file_path = rx => {
match file_path {
Ok(Some(file_path)) => {
info!("({id}): accepted saving to '{:?}'", file_path);
let cancelled = Arc::new(AtomicBool::new(false));
this.spacedrop_cancelations
.lock()
.await
.insert(id, cancelled.clone());
stream.write_all(&[1]).await.unwrap();
let names = req.requests.iter().map(|req| req.name.clone()).collect::<Vec<_>>();
let mut transfer = Transfer::new(&req, |percent| {
this.events.0.send(P2PEvent::SpacedropProgress { id, percent }).ok();
}, &cancelled);
let file_path = PathBuf::from(file_path);
let names_len = names.len();
for file_name in names {
// When transferring more than 1 file we wanna join the incoming file name to the directory provided by the user
let mut path = file_path.clone();
if names_len != 1 {
// We know the `file_path` will be a directory so we can just push the file name to it
path.push(&file_name);
}
debug!("({id}): accepting '{file_name}' and saving to '{:?}'", path);
if let Some(parent) = path.parent() {
create_dir_all(parent).await.unwrap();
}
let f = File::create(path).await.unwrap();
let f = BufWriter::new(f);
transfer.receive(&mut stream, f).await;
}
info!("({id}): complete");
}
Ok(None) => {
info!("({id}): rejected");
stream.write_all(&[0]).await.unwrap();
stream.flush().await.unwrap();
}
Err(_) => {
info!("({id}): error with Spacedrop pairing request receiver!");
}
}
}
};
}

View file

@ -0,0 +1,52 @@
use sd_p2p::spacetunnel::RemoteIdentity;
use serde::Serialize;
use specta::Type;
use uuid::Uuid;
use super::{OperatingSystem, PairingStatus, PeerMetadata};
/// TODO: P2P event for the frontend
#[derive(Debug, Clone, Serialize, Type)]
#[serde(tag = "type")]
pub enum P2PEvent {
DiscoveredPeer {
identity: RemoteIdentity,
metadata: PeerMetadata,
},
ExpiredPeer {
identity: RemoteIdentity,
},
ConnectedPeer {
identity: RemoteIdentity,
},
DisconnectedPeer {
identity: RemoteIdentity,
},
SpacedropRequest {
id: Uuid,
identity: RemoteIdentity,
peer_name: String,
files: Vec<String>,
},
SpacedropProgress {
id: Uuid,
percent: u8,
},
SpacedropTimedout {
id: Uuid,
},
SpacedropRejected {
id: Uuid,
},
// Pairing was reuqest has come in.
// This will fire on the responder only.
PairingRequest {
id: u16,
name: String,
os: OperatingSystem,
},
PairingProgress {
id: u16,
status: PairingStatus,
}, // TODO: Expire peer + connection/disconnect
}

View file

@ -1,100 +1,35 @@
use std::{
borrow::Cow,
collections::HashMap,
path::{Path, PathBuf},
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
time::{Duration, Instant},
collections::{HashMap, HashSet},
net::SocketAddr,
sync::{atomic::AtomicBool, Arc},
};
use futures::future::join_all;
use sd_p2p::{
spaceblock::{BlockSize, Range, SpaceblockRequest, SpaceblockRequests, Transfer},
spacetunnel::{RemoteIdentity, Tunnel},
Event, Manager, ManagerError, ManagerStream, MetadataManager, PeerId,
spacetunnel::RemoteIdentity, Manager, ManagerConfig, ManagerError, PeerStatus, Service,
};
use sd_prisma::prisma::file_path;
use serde::Serialize;
use specta::Type;
use tokio::{
fs::{create_dir_all, File},
io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, BufReader, BufWriter},
sync::{broadcast, oneshot, Mutex},
time::sleep,
};
use tracing::{debug, error, info};
use tokio::sync::{broadcast, mpsc, oneshot, Mutex};
use tracing::info;
use uuid::Uuid;
use crate::{
library::Library,
location::file_path_helper::{file_path_to_handle_p2p_serve_file, IsolatedFilePathData},
node::config::{self, NodeConfig},
node::config,
p2p::{OperatingSystem, SPACEDRIVE_APP_ID},
Node,
};
use super::{
sync::{InstanceState, NetworkedLibraries, SyncMessage},
Header, PairingManager, PairingStatus, PeerMetadata,
LibraryMetadata, LibraryServices, P2PEvent, P2PManagerActor, PairingManager, PeerMetadata,
};
/// The amount of time to wait for a Spacedrop request to be accepted or rejected before it's automatically rejected
const SPACEDROP_TIMEOUT: Duration = Duration::from_secs(60);
/// TODO: P2P event for the frontend
#[derive(Debug, Clone, Type, Serialize)]
#[serde(tag = "type")]
pub enum P2PEvent {
DiscoveredPeer {
peer_id: PeerId,
metadata: PeerMetadata,
},
ExpiredPeer {
peer_id: PeerId,
},
ConnectedPeer {
peer_id: PeerId,
},
DisconnectedPeer {
peer_id: PeerId,
},
SpacedropRequest {
id: Uuid,
peer_id: PeerId,
peer_name: String,
files: Vec<String>,
},
SpacedropProgress {
id: Uuid,
percent: u8,
},
SpacedropTimedout {
id: Uuid,
},
SpacedropRejected {
id: Uuid,
},
// Pairing was reuqest has come in.
// This will fire on the responder only.
PairingRequest {
id: u16,
name: String,
os: OperatingSystem,
},
PairingProgress {
id: u16,
status: PairingStatus,
}, // TODO: Expire peer + connection/disconnect
}
pub struct P2PManager {
pub(crate) node: Service<PeerMetadata>,
pub(crate) libraries: LibraryServices,
pub events: (broadcast::Sender<P2PEvent>, broadcast::Receiver<P2PEvent>),
pub manager: Arc<Manager<PeerMetadata>>,
spacedrop_pairing_reqs: Arc<Mutex<HashMap<Uuid, oneshot::Sender<Option<String>>>>>,
spacedrop_cancelations: Arc<Mutex<HashMap<Uuid, Arc<AtomicBool>>>>,
pub metadata_manager: Arc<MetadataManager<PeerMetadata>>,
pub manager: Arc<Manager>,
pub(super) spacedrop_pairing_reqs: Arc<Mutex<HashMap<Uuid, oneshot::Sender<Option<String>>>>>,
pub(super) spacedrop_cancelations: Arc<Mutex<HashMap<Uuid, Arc<AtomicBool>>>>,
pub pairing: Arc<PairingManager>,
node_config_manager: Arc<config::Manager>,
}
@ -102,661 +37,139 @@ pub struct P2PManager {
impl P2PManager {
pub async fn new(
node_config: Arc<config::Manager>,
) -> Result<(Arc<P2PManager>, ManagerStream<PeerMetadata>), ManagerError> {
let (config, keypair, manager_config) = {
libraries: Arc<crate::library::Libraries>,
) -> Result<(Arc<P2PManager>, P2PManagerActor), ManagerError> {
let (keypair, manager_config) = {
let config = node_config.get().await;
// TODO: The `vec![]` here is problematic but will be fixed with delayed `MetadataManager`
(
Self::config_to_metadata(&config, vec![]),
config.keypair,
config.p2p.clone(),
)
(config.keypair, config.p2p.clone())
};
// TODO: Delay building this until the libraries are loaded
let metadata_manager = MetadataManager::new(config);
let (manager, stream) = sd_p2p::Manager::<PeerMetadata>::new(
SPACEDRIVE_APP_ID,
&keypair,
manager_config,
metadata_manager.clone(),
)
.await?;
let (manager, stream) =
sd_p2p::Manager::new(SPACEDRIVE_APP_ID, &keypair, manager_config).await?;
info!(
"Node '{}' is now online listening at addresses: {:?}",
manager.peer_id(),
manager.listen_addrs().await
"Node RemoteIdentity('{}') libp2p::PeerId('{}') is now online listening at addresses: {:?}",
manager.identity(),
manager.libp2p_peer_id(),
stream.listen_addrs()
);
// need to keep 'rx' around so that the channel isn't dropped
let (tx, rx) = broadcast::channel(100);
let pairing = PairingManager::new(manager.clone(), tx.clone(), metadata_manager.clone());
let pairing = PairingManager::new(manager.clone(), tx.clone());
let (register_service_tx, register_service_rx) = mpsc::channel(10);
let this = Arc::new(Self {
node: Service::new("node", manager.clone()).unwrap(),
libraries: LibraryServices::new(register_service_tx),
pairing,
events: (tx, rx),
manager,
spacedrop_pairing_reqs: Default::default(),
spacedrop_cancelations: Default::default(),
node_config_manager: node_config,
});
this.update_metadata().await;
tokio::spawn(LibraryServices::start(this.clone(), libraries));
Ok((
Arc::new(Self {
pairing,
events: (tx, rx),
manager,
spacedrop_pairing_reqs: Default::default(),
spacedrop_cancelations: Default::default(),
metadata_manager,
node_config_manager: node_config,
}),
stream,
this.clone(),
P2PManagerActor {
manager: this,
stream,
register_service_rx,
},
))
}
pub fn start(&self, mut stream: ManagerStream<PeerMetadata>, node: Arc<Node>) {
tokio::spawn({
let manager = self.manager.clone();
let metadata_manager = self.metadata_manager.clone();
let events = self.events.0.clone();
let spacedrop_pairing_reqs = self.spacedrop_pairing_reqs.clone();
let spacedrop_cancelations = self.spacedrop_cancelations.clone();
pub fn get_library_service(&self, library_id: &Uuid) -> Option<Arc<Service<LibraryMetadata>>> {
self.libraries.get(library_id)
}
let pairing = self.pairing.clone();
async move {
let mut shutdown = false;
while let Some(event) = stream.next().await {
match event {
Event::PeerDiscovered(event) => {
events
.send(P2PEvent::DiscoveredPeer {
peer_id: event.peer_id,
metadata: event.metadata.clone(),
})
.map_err(|_| error!("Failed to send event to p2p event stream!"))
.ok();
node.nlm.peer_discovered(event).await;
}
Event::PeerExpired { id, .. } => {
events
.send(P2PEvent::ExpiredPeer { peer_id: id })
.map_err(|_| error!("Failed to send event to p2p event stream!"))
.ok();
node.nlm.peer_expired(id).await;
}
Event::PeerConnected(event) => {
events
.send(P2PEvent::ConnectedPeer {
peer_id: event.peer_id,
})
.map_err(|_| error!("Failed to send event to p2p event stream!"))
.ok();
node.nlm.peer_connected(event.peer_id).await;
let manager = manager.clone();
let nlm = node.nlm.clone();
let instances = metadata_manager.get().instances;
let node = node.clone();
tokio::spawn(async move {
if event.establisher {
let mut stream = manager.stream(event.peer_id).await.unwrap();
Self::resync(
nlm.clone(),
&mut stream,
event.peer_id,
instances,
)
.await;
drop(stream);
}
Self::resync_part2(nlm, node, &event.peer_id).await;
});
}
Event::PeerDisconnected(peer_id) => {
events
.send(P2PEvent::DisconnectedPeer { peer_id })
.map_err(|_| error!("Failed to send event to p2p event stream!"))
.ok();
node.nlm.peer_disconnected(peer_id).await;
}
Event::PeerMessage(event) => {
let events = events.clone();
let metadata_manager = metadata_manager.clone();
let spacedrop_pairing_reqs = spacedrop_pairing_reqs.clone();
let pairing = pairing.clone();
let spacedrop_cancelations = spacedrop_cancelations.clone();
let node = node.clone();
let manager = manager.clone();
tokio::spawn(async move {
let mut stream = event.stream;
let header = Header::from_stream(&mut stream).await.unwrap();
match header {
Header::Ping => {
debug!("Received ping from peer '{}'", event.peer_id);
}
Header::Spacedrop(req) => {
let id = req.id;
let (tx, rx) = oneshot::channel();
info!(
"({id}): received '{}' files from peer '{}' with block size '{:?}'",
req.requests.len(), event.peer_id, req.block_size
);
spacedrop_pairing_reqs.lock().await.insert(id, tx);
if events
.send(P2PEvent::SpacedropRequest {
id,
peer_id: event.peer_id,
peer_name: manager
.get_discovered_peers()
.await
.into_iter()
.find(|p| p.peer_id == event.peer_id)
.map(|p| p.metadata.name)
.unwrap_or_else(|| "Unknown".to_string()),
files: req
.requests
.iter()
.map(|req| req.name.clone())
.collect::<Vec<_>>(),
})
.is_err()
{
// No frontend's are active
todo!("Outright reject Spacedrop");
}
tokio::select! {
_ = sleep(SPACEDROP_TIMEOUT) => {
info!("({id}): timeout, rejecting!");
stream.write_all(&[0]).await.unwrap();
stream.flush().await.unwrap();
}
file_path = rx => {
match file_path {
Ok(Some(file_path)) => {
info!("({id}): accepted saving to '{:?}'", file_path);
let cancelled = Arc::new(AtomicBool::new(false));
spacedrop_cancelations
.lock()
.await
.insert(id, cancelled.clone());
stream.write_all(&[1]).await.unwrap();
let names = req.requests.iter().map(|req| req.name.clone()).collect::<Vec<_>>();
let mut transfer = Transfer::new(&req, |percent| {
events.send(P2PEvent::SpacedropProgress { id, percent }).ok();
}, &cancelled);
let file_path = PathBuf::from(file_path);
let names_len = names.len();
for file_name in names {
// When transferring more than 1 file we wanna join the incoming file name to the directory provided by the user
let mut path = file_path.clone();
if names_len != 1 {
// We know the `file_path` will be a directory so we can just push the file name to it
path.push(&file_name);
}
debug!("({id}): accepting '{file_name}' and saving to '{:?}'", path);
if let Some(parent) = path.parent() {
create_dir_all(parent).await.unwrap();
}
let f = File::create(path).await.unwrap();
let f = BufWriter::new(f);
transfer.receive(&mut stream, f).await;
}
info!("({id}): complete");
}
Ok(None) => {
info!("({id}): rejected");
stream.write_all(&[0]).await.unwrap();
stream.flush().await.unwrap();
}
Err(_) => {
info!("({id}): error with Spacedrop pairing request receiver!");
}
}
}
};
}
Header::Pair => {
pairing
.responder(
event.peer_id,
stream,
&node.libraries,
node.clone(),
)
.await;
}
Header::Sync(library_id) => {
let mut tunnel = Tunnel::responder(stream).await.unwrap();
let msg =
SyncMessage::from_stream(&mut tunnel).await.unwrap();
let library =
node.libraries.get_library(&library_id).await.unwrap();
match msg {
SyncMessage::NewOperations => {
super::sync::responder(&mut tunnel, library).await;
}
};
}
Header::File {
id,
library_id,
file_path_id,
range,
} => {
if !node.files_over_p2p_flag.load(Ordering::Relaxed) {
panic!("Files over P2P is disabled!");
}
// TODO: Tunnel and authentication
// TODO: Use BufReader
let library =
node.libraries.get_library(&library_id).await.unwrap();
let file_path = library
.db
.file_path()
.find_unique(file_path::pub_id::equals(
file_path_id.as_bytes().to_vec(),
))
.select(file_path_to_handle_p2p_serve_file::select())
.exec()
.await
.unwrap()
.unwrap();
let location = file_path.location.as_ref().unwrap();
let location_path = location.path.as_ref().unwrap();
let path = Path::new(location_path).join(
IsolatedFilePathData::try_from((
location.id,
&file_path,
))
.unwrap(),
);
debug!("Serving path '{:?}' over P2P", path);
let file = File::open(&path).await.unwrap();
let metadata = file.metadata().await.unwrap();
let block_size = BlockSize::from_size(metadata.len());
stream.write_all(&block_size.to_bytes()).await.unwrap();
stream
.write_all(&metadata.len().to_le_bytes())
.await
.unwrap();
let file = BufReader::new(file);
Transfer::new(
&SpaceblockRequests {
id,
block_size,
requests: vec![SpaceblockRequest {
// TODO: Removing need for this field in this case
name: "todo".to_string(),
size: metadata.len(),
range,
}],
},
|percent| {
debug!(
"P2P loading file path '{}' - progress {}%",
file_path_id, percent
);
},
&Arc::new(AtomicBool::new(false)),
)
.send(&mut stream, file)
.await;
}
Header::Connected(identities) => {
Self::resync_handler(
&node.nlm,
&mut stream,
event.peer_id,
metadata_manager.get().instances,
identities,
)
.await;
}
}
});
}
Event::PeerBroadcast(_event) => {
// todo!();
}
Event::Shutdown => {
shutdown = true;
break;
}
_ => {}
}
}
if !shutdown {
error!(
"Manager event stream closed! The core is unstable from this point forward!"
);
}
pub async fn update_metadata(&self) {
self.node.update({
let config = self.node_config_manager.get().await;
PeerMetadata {
name: config.name.clone(),
operating_system: Some(OperatingSystem::get_os()),
version: Some(env!("CARGO_PKG_VERSION").to_string()),
}
});
}
fn config_to_metadata(config: &NodeConfig, instances: Vec<RemoteIdentity>) -> PeerMetadata {
PeerMetadata {
name: config.name.clone(),
operating_system: Some(OperatingSystem::get_os()),
version: Some(env!("CARGO_PKG_VERSION").to_string()),
// TODO: Source these via Spacedrive account
// TODO: Maybe anonymise them like Apple do
email: None,
img_url: None,
instances,
}
}
// TODO: Remove this & move to `NetworkedLibraryManager`??? or make it private?
pub async fn update_metadata(&self, instances: Vec<RemoteIdentity>) {
self.metadata_manager.update(Self::config_to_metadata(
&self.node_config_manager.get().await,
instances,
));
}
pub async fn resync(
nlm: Arc<NetworkedLibraries>,
stream: &mut (impl AsyncRead + AsyncWrite + Unpin),
peer_id: PeerId,
instances: Vec<RemoteIdentity>,
) {
// TODO: Make this encrypted using node to node auth so it can't be messed with in transport
stream
.write_all(&Header::Connected(instances).to_bytes())
.await
.unwrap();
let Header::Connected(identities) = Header::from_stream(stream).await.unwrap() else {
panic!("unreachable but error handling")
};
for identity in identities {
nlm.peer_connected2(identity, peer_id).await;
}
}
pub async fn resync_handler(
nlm: &NetworkedLibraries,
stream: &mut (impl AsyncRead + AsyncWrite + Unpin),
peer_id: PeerId,
local_identities: Vec<RemoteIdentity>,
remote_identities: Vec<RemoteIdentity>,
) {
for identity in remote_identities {
nlm.peer_connected2(identity, peer_id).await;
}
stream
.write_all(&Header::Connected(local_identities).to_bytes())
.await
.unwrap();
}
// TODO: Using tunnel for security - Right now all sync events here are unencrypted
pub async fn resync_part2(
nlm: Arc<NetworkedLibraries>,
node: Arc<Node>,
connected_with_peer_id: &PeerId,
) {
for (library_id, data) in nlm.state().await {
let mut library = None;
for (_, data) in data.instances {
let InstanceState::Connected(instance_peer_id) = data else {
continue;
};
if instance_peer_id != *connected_with_peer_id {
continue;
};
let library = match library.clone() {
Some(library) => library,
None => match node.libraries.get_library(&library_id).await {
Some(new_library) => {
library = Some(new_library.clone());
new_library
}
None => continue,
},
};
// Remember, originator creates a new stream internally so the handler for this doesn't have to do anything.
super::sync::originator(library_id, &library.sync, &node.nlm, &node.p2p).await;
}
}
}
pub async fn accept_spacedrop(&self, id: Uuid, path: String) {
if let Some(chan) = self.spacedrop_pairing_reqs.lock().await.remove(&id) {
chan.send(Some(path)).unwrap(); // TODO: will fail if timed out
}
}
pub async fn reject_spacedrop(&self, id: Uuid) {
if let Some(chan) = self.spacedrop_pairing_reqs.lock().await.remove(&id) {
chan.send(None).unwrap();
}
}
pub async fn cancel_spacedrop(&self, id: Uuid) {
if let Some(cancelled) = self.spacedrop_cancelations.lock().await.remove(&id) {
cancelled.store(true, Ordering::Relaxed);
}
}
pub fn subscribe(&self) -> broadcast::Receiver<P2PEvent> {
self.events.0.subscribe()
}
pub async fn ping(&self) {
self.manager.broadcast(Header::Ping.to_bytes()).await;
}
// TODO: Replace this with a better system that is more built into `sd-p2p` crate
pub fn state(&self) -> P2PState {
let (
self_peer_id,
self_identity,
config,
manager_connected,
manager_connections,
dicovery_services,
discovery_discovered,
discovery_known,
) = self.manager.get_debug_state();
// TODO: Proper error handling
pub async fn spacedrop(
self: Arc<Self>,
peer_id: PeerId,
paths: Vec<PathBuf>,
) -> Result<Uuid, ()> {
if paths.is_empty() {
return Err(());
P2PState {
node: self.node.get_state(),
libraries: self
.libraries
.libraries()
.into_iter()
.map(|(id, lib)| (id, lib.get_state()))
.collect(),
self_peer_id: PeerId(self_peer_id),
self_identity,
config,
manager_connected: manager_connected
.into_iter()
.map(|(k, v)| (PeerId(k), v))
.collect(),
manager_connections: manager_connections.into_iter().map(PeerId).collect(),
dicovery_services,
discovery_discovered: discovery_discovered
.into_iter()
.map(|(k, v)| {
(
k,
v.into_iter()
.map(|(k, (k1, v, b))| (k, (PeerId(k1), v, b)))
.collect(),
)
})
.collect(),
discovery_known,
}
let (files, requests): (Vec<_>, Vec<_>) =
join_all(paths.into_iter().map(|path| async move {
let file = File::open(&path).await?;
let metadata = file.metadata().await?;
let name = path
.file_name()
.map(|v| v.to_string_lossy())
.unwrap_or(Cow::Borrowed(""))
.to_string();
Ok((
(path, file),
SpaceblockRequest {
name,
size: metadata.len(),
range: Range::Full,
},
))
}))
.await
.into_iter()
.collect::<Result<Vec<_>, std::io::Error>>()
.map_err(|_| ())? // TODO: Error handling
.into_iter()
.unzip();
let total_length: u64 = requests.iter().map(|req| req.size).sum();
let id = Uuid::new_v4();
debug!("({id}): starting Spacedrop with peer '{peer_id}");
let mut stream = self.manager.stream(peer_id).await.map_err(|err| {
debug!("({id}): failed to connect: {err:?}");
// TODO: Proper error
})?;
tokio::spawn(async move {
debug!("({id}): connected, sending header");
let header = Header::Spacedrop(SpaceblockRequests {
id,
block_size: BlockSize::from_size(total_length),
requests,
});
if let Err(err) = stream.write_all(&header.to_bytes()).await {
debug!("({id}): failed to send header: {err}");
return;
}
let Header::Spacedrop(requests) = header else {
unreachable!();
};
debug!("({id}): waiting for response");
let result = tokio::select! {
result = stream.read_u8() => result,
// Add 5 seconds incase the user responded on the deadline and slow network
_ = sleep(SPACEDROP_TIMEOUT + Duration::from_secs(5)) => {
debug!("({id}): timed out, cancelling");
self.events.0.send(P2PEvent::SpacedropTimedout { id }).ok();
return;
},
};
match result {
Ok(0) => {
debug!("({id}): Spacedrop was rejected from peer '{peer_id}'");
self.events.0.send(P2PEvent::SpacedropRejected { id }).ok();
return;
}
Ok(1) => {} // Okay
Ok(_) => todo!(), // TODO: Proper error
Err(_) => todo!(), // TODO: Proper error
}
let cancelled = Arc::new(AtomicBool::new(false));
self.spacedrop_cancelations
.lock()
.await
.insert(id, cancelled.clone());
debug!("({id}): starting transfer");
let i = Instant::now();
let mut transfer = Transfer::new(
&requests,
|percent| {
self.events
.0
.send(P2PEvent::SpacedropProgress { id, percent })
.ok();
},
&cancelled,
);
for (file_id, (path, file)) in files.into_iter().enumerate() {
debug!("({id}): transmitting '{file_id}' from '{path:?}'");
let file = BufReader::new(file);
transfer.send(&mut stream, file).await;
}
debug!("({id}): finished; took '{:?}", i.elapsed());
});
Ok(id)
}
// DO NOT USE THIS WITHOUT `node.files_over_p2p_flag == true`
// TODO: Error handling
pub async fn request_file(
&self,
peer_id: PeerId,
library: &Library,
file_path_id: Uuid,
range: Range,
output: impl AsyncWrite + Unpin,
) {
let id = Uuid::new_v4();
let mut stream = self.manager.stream(peer_id).await.unwrap(); // TODO: handle providing incorrect peer id
// TODO: Tunnel for encryption + authentication
stream
.write_all(
&Header::File {
id,
library_id: library.id,
file_path_id,
range: range.clone(),
}
.to_bytes(),
)
.await
.unwrap();
let block_size = BlockSize::from_stream(&mut stream).await.unwrap();
let size = stream.read_u64_le().await.unwrap();
Transfer::new(
&SpaceblockRequests {
id,
block_size,
requests: vec![SpaceblockRequest {
// TODO: Removing need for this field in this case
name: "todo".to_string(),
// TODO: Maybe removing need for `size` from this side
size,
range,
}],
},
|percent| {
debug!(
"P2P receiving file path '{}' - progress {}%",
file_path_id, percent
);
},
&Arc::new(AtomicBool::new(false)),
)
.receive(&mut stream, output)
.await;
}
pub async fn shutdown(&self) {
self.manager.shutdown().await;
}
}
#[derive(Debug, Serialize, Type)]
#[allow(clippy::type_complexity)]
pub struct P2PState {
node: HashMap<RemoteIdentity, PeerStatus>,
libraries: Vec<(Uuid, HashMap<RemoteIdentity, PeerStatus>)>,
self_peer_id: PeerId,
self_identity: RemoteIdentity,
config: ManagerConfig,
manager_connected: HashMap<PeerId, RemoteIdentity>,
manager_connections: HashSet<PeerId>,
dicovery_services: HashMap<String, Option<HashMap<String, String>>>,
discovery_discovered: HashMap<
String,
HashMap<RemoteIdentity, (PeerId, HashMap<String, String>, Vec<SocketAddr>)>,
>,
discovery_known: HashMap<String, HashSet<RemoteIdentity>>,
}
// TODO: Get this back into `sd-p2p` but keep it private
#[derive(Debug, Serialize, Type, Hash, Eq, PartialEq, Ord, PartialOrd, Clone)]
pub struct PeerId(#[specta(type = String)] sd_p2p::internal::PeerId);

View file

@ -0,0 +1,130 @@
use std::sync::Arc;
use futures::StreamExt;
use sd_p2p::{spacetunnel::Tunnel, Event, ManagerStream, Service, ServiceEvent};
use tokio::sync::mpsc;
use tracing::error;
use crate::Node;
use super::{operations, sync::SyncMessage, Header, LibraryMetadata, P2PEvent, P2PManager};
pub struct P2PManagerActor {
pub(super) manager: Arc<P2PManager>,
pub(super) stream: ManagerStream,
pub(super) register_service_rx: mpsc::Receiver<Arc<Service<LibraryMetadata>>>,
}
impl P2PManagerActor {
pub fn start(self, node: Arc<Node>) {
let Self {
manager: this,
mut stream,
mut register_service_rx,
} = self;
tokio::spawn({
async move {
let mut node_rx = this.node.listen();
loop {
tokio::select! {
// TODO: We ignore the response of this but I suspect it will be useful in the future so it stays for now.
Some(_event) = register_service_rx.recv() => {},
// TODO: We should subscribe to library-level events too but frontend isn't cut out for them right now.
Some(Ok(event)) = node_rx.next() => {
this.events.0
.send(match event {
ServiceEvent::Discovered { identity, metadata } =>
P2PEvent::DiscoveredPeer {
identity,
metadata,
},
ServiceEvent::Expired { identity } =>
P2PEvent::ExpiredPeer {
identity,
},
})
.map_err(|_| error!("Failed to send event to p2p event stream!"))
.ok();
}
Some(event) = stream.next() => {
match event {
Event::PeerConnected(event) => {
this.events
.0
.send(P2PEvent::ConnectedPeer {
identity: event.identity,
})
.map_err(|_| error!("Failed to send event to p2p event stream!"))
.ok();
}
Event::PeerDisconnected(identity) => {
this.events
.0
.send(P2PEvent::DisconnectedPeer { identity })
.map_err(|_| error!("Failed to send event to p2p event stream!"))
.ok();
}
Event::PeerMessage(mut event) => {
let this = this.clone();
let node = node.clone();
tokio::spawn(async move {
let header = Header::from_stream(&mut event.stream).await.unwrap();
match header {
Header::Ping => operations::ping::reciever(event).await,
Header::Spacedrop(req) => {
operations::spacedrop::reciever(&this, req, event).await
}
Header::Pair => {
this.pairing
.clone()
.responder(
event.identity,
event.stream,
&node.libraries,
node.clone(),
)
.await;
}
Header::Sync(library_id) => {
let mut tunnel =
Tunnel::responder(event.stream).await.unwrap();
let msg =
SyncMessage::from_stream(&mut tunnel).await.unwrap();
let library =
node.libraries.get_library(&library_id).await.unwrap();
match msg {
SyncMessage::NewOperations => {
super::sync::responder(&mut tunnel, library).await;
}
};
}
Header::File(req) => {
operations::request_file::reciever(&node, req, event).await
}
}
});
}
Event::PeerBroadcast(_event) => {
panic!("Broadcast's are cringe");
}
Event::Shutdown => break,
_ => {}
}
}
}
}
error!(
"Manager event stream closed! The core is unstable from this point forward!"
);
}
});
}
}

View file

@ -10,7 +10,10 @@ use std::{
use chrono::Utc;
use futures::channel::oneshot;
use sd_p2p::{spacetunnel::Identity, Manager, MetadataManager, PeerId};
use sd_p2p::{
spacetunnel::{Identity, RemoteIdentity},
Manager, Metadata,
};
use sd_prisma::prisma::instance;
use serde::{Deserialize, Serialize};
@ -39,22 +42,16 @@ pub struct PairingManager {
id: AtomicU16,
events_tx: broadcast::Sender<P2PEvent>,
pairing_response: RwLock<HashMap<u16, oneshot::Sender<PairingDecision>>>,
manager: Arc<Manager<PeerMetadata>>,
metadata_manager: Arc<MetadataManager<PeerMetadata>>,
manager: Arc<Manager>,
}
impl PairingManager {
pub fn new(
manager: Arc<Manager<PeerMetadata>>,
events_tx: broadcast::Sender<P2PEvent>,
metadata_manager: Arc<MetadataManager<PeerMetadata>>,
) -> Arc<Self> {
pub fn new(manager: Arc<Manager>, events_tx: broadcast::Sender<P2PEvent>) -> Arc<Self> {
Arc::new(Self {
id: AtomicU16::new(0),
events_tx,
pairing_response: RwLock::new(HashMap::new()),
manager,
metadata_manager,
})
}
@ -72,16 +69,16 @@ impl PairingManager {
// TODO: Error handling
pub async fn originator(self: Arc<Self>, peer_id: PeerId, node: Arc<Node>) -> u16 {
pub async fn originator(self: Arc<Self>, identity: RemoteIdentity, node: Arc<Node>) -> u16 {
// TODO: Timeout for max number of pairings in a time period
let pairing_id = self.id.fetch_add(1, Ordering::SeqCst);
self.emit_progress(pairing_id, PairingStatus::EstablishingConnection);
info!("Beginning pairing '{pairing_id}' as originator to remote peer '{peer_id}'");
info!("Beginning pairing '{pairing_id}' as originator to remote peer '{identity}'");
tokio::spawn(async move {
let mut stream = self.manager.stream(peer_id).await.unwrap();
let mut stream = self.manager.stream(identity).await.unwrap();
stream.write_all(&Header::Pair.to_bytes()).await.unwrap();
// TODO: Ensure both clients are on a compatible version cause Prisma model changes will cause issues
@ -205,20 +202,12 @@ impl PairingManager {
// Called again so the new instances are picked up
node.libraries.update_instances(library.clone()).await;
P2PManager::resync(
node.nlm.clone(),
&mut stream,
peer_id,
self.metadata_manager.get().instances,
)
.await;
// TODO: Done message to frontend
self.emit_progress(pairing_id, PairingStatus::PairingComplete(library_id));
stream.flush().await.unwrap();
// Remember, originator creates a new stream internally so the handler for this doesn't have to do anything.
super::sync::originator(library_id, &library.sync, &node.nlm, &node.p2p).await;
super::sync::originator(library_id, &library.sync, &node.p2p).await;
}
PairingResponse::Rejected => {
info!("Pairing '{pairing_id}' rejected by remote");
@ -232,7 +221,7 @@ impl PairingManager {
pub async fn responder(
self: Arc<Self>,
peer_id: PeerId,
identity: RemoteIdentity,
mut stream: impl AsyncRead + AsyncWrite + Unpin,
library_manager: &Libraries,
node: Arc<Node>,
@ -240,7 +229,7 @@ impl PairingManager {
let pairing_id = self.id.fetch_add(1, Ordering::SeqCst);
self.emit_progress(pairing_id, PairingStatus::EstablishingConnection);
info!("Beginning pairing '{pairing_id}' as responder to remote peer '{peer_id}'");
info!("Beginning pairing '{pairing_id}' as responder to remote peer '{identity}'");
let remote_instance = PairingRequest::from_stream(&mut stream).await.unwrap().0;
self.emit_progress(pairing_id, PairingStatus::PairingDecisionRequest);
@ -275,8 +264,7 @@ impl PairingManager {
// TODO: Rollback this on pairing failure
instance::Create {
pub_id: remote_instance.id.as_bytes().to_vec(),
identity: IdentityOrRemoteIdentity::RemoteIdentity(remote_instance.identity.clone())
.to_bytes(),
identity: IdentityOrRemoteIdentity::RemoteIdentity(remote_instance.identity).to_bytes(),
node_id: remote_instance.node_id.as_bytes().to_vec(),
node_name: remote_instance.node_name,
node_platform: remote_instance.node_platform as i32,
@ -326,29 +314,11 @@ impl PairingManager {
// TODO: Pairing confirmation + rollback
// Called again so the new instances are picked up
// node.re
// library_manager.node.nlm.load_library(&library).await;
let Header::Connected(remote_identities) = Header::from_stream(&mut stream).await.unwrap()
else {
todo!("unreachable; todo error handling");
};
P2PManager::resync_handler(
&node.nlm,
&mut stream,
peer_id,
self.metadata_manager.get().instances,
remote_identities,
)
.await;
self.emit_progress(pairing_id, PairingStatus::PairingComplete(library_id));
stream.flush().await.unwrap();
// Remember, originator creates a new stream internally so the handler for this doesn't have to do anything.
super::sync::originator(library_id, &library.sync, &node.nlm, &node.p2p).await;
super::sync::originator(library_id, &library.sync, &node.p2p).await;
}
}

View file

@ -104,7 +104,7 @@ impl Instance {
let mut buf = Vec::new();
encode::uuid(&mut buf, id);
encode::buf(&mut buf, &identity.to_bytes());
encode::buf(&mut buf, &identity.get_bytes());
encode::uuid(&mut buf, node_id);
encode::string(&mut buf, node_name);
buf.push(*node_platform as u8);

View file

@ -1,10 +1,8 @@
use std::{collections::HashMap, env, str::FromStr};
use itertools::Itertools;
use sd_p2p::{spacetunnel::RemoteIdentity, Metadata, PeerId};
use sd_p2p::Metadata;
use serde::{Deserialize, Serialize};
use specta::Type;
use tracing::warn;
use crate::node::Platform;
@ -13,11 +11,6 @@ pub struct PeerMetadata {
pub(super) name: String,
pub(super) operating_system: Option<OperatingSystem>,
pub(super) version: Option<String>,
pub(super) email: Option<String>,
pub(super) img_url: Option<String>,
// TODO: Max vec length to prevent it being used to spam??
#[serde(skip)]
pub(super) instances: Vec<RemoteIdentity>,
}
impl Metadata for PeerMetadata {
@ -30,32 +23,10 @@ impl Metadata for PeerMetadata {
if let Some(version) = self.version {
map.insert("version".to_owned(), version);
}
if let Some(email) = self.email {
map.insert("email".to_owned(), email);
}
if let Some(img_url) = self.img_url {
map.insert("img_url".to_owned(), img_url);
}
// This is not pretty but a DNS record has a max of 255 characters so we use multiple records. Be aware the MDNS library adds `i_{i}=` to the start so it counts towards the 255 length.
self.instances
.into_iter()
.map(|i| hex::encode(i.to_bytes()))
.collect::<Vec<_>>()
.join(",")
.chars()
.chunks(249 /* 3 (`i_=`) + 3 (`100`) */)
.into_iter()
.map(|c| c.collect::<String>())
.enumerate()
.for_each(|(i, s)| {
map.insert(format!("i_{}", i), s);
});
map
}
fn from_hashmap(peer_id: &PeerId, data: &HashMap<String, String>) -> Result<Self, String>
fn from_hashmap(data: &HashMap<String, String>) -> Result<Self, String>
where
Self: Sized,
{
@ -72,42 +43,6 @@ impl Metadata for PeerMetadata {
.map(|os| os.parse().map_err(|_| "Unable to parse 'OperationSystem'!"))
.transpose()?,
version: data.get("version").map(|v| v.to_owned()),
email: data.get("email").map(|v| v.to_owned()),
img_url: data.get("img_url").map(|v| v.to_owned()),
instances: {
let mut i = 0;
let mut instances = String::new();
while let Some(s) = data.get(&format!("i_{}", i)) {
instances.push_str(s);
i += 1;
}
instances
.split(',')
.filter_map(|s| {
// "".split(",").collect::<Vec<_>>() == [""]
if s.is_empty() {
return None;
}
RemoteIdentity::from_bytes(
&hex::decode(s)
.map_err(|e| {
warn!(
"Unable to parse instance from peer '{peer_id}'s metadata!"
);
e
})
.ok()?,
)
.map_err(|e| {
warn!("Unable to parse instance from peer '{peer_id}'s metadata!");
e
})
.ok()
})
.collect::<Vec<_>>()
},
})
}
}

View file

@ -5,9 +5,17 @@ use uuid::Uuid;
use sd_p2p::{
proto::{decode, encode},
spaceblock::{Range, SpaceblockRequests, SpaceblockRequestsError},
spacetunnel::RemoteIdentity,
};
#[derive(Debug, PartialEq, Eq)]
pub struct HeaderFile {
// Request ID
pub(crate) id: Uuid,
pub(crate) library_id: Uuid,
pub(crate) file_path_id: Uuid,
pub(crate) range: Range,
}
/// TODO
#[derive(Debug, PartialEq, Eq)]
pub enum Header {
@ -16,16 +24,7 @@ pub enum Header {
Spacedrop(SpaceblockRequests),
Pair,
Sync(Uuid),
File {
// Request ID
id: Uuid,
library_id: Uuid,
file_path_id: Uuid,
range: Range,
},
// TODO: Remove need for this
Connected(Vec<RemoteIdentity>),
File(HeaderFile),
}
#[derive(Debug, Error)]
@ -58,7 +57,7 @@ impl Header {
.await
.map_err(HeaderError::SyncRequest)?,
)),
4 => Ok(Self::File {
4 => Ok(Self::File(HeaderFile {
id: decode::uuid(stream).await.unwrap(),
library_id: decode::uuid(stream).await.unwrap(),
file_path_id: decode::uuid(stream).await.unwrap(),
@ -71,17 +70,6 @@ impl Header {
}
_ => todo!(),
},
}),
// TODO: Error handling
255 => Ok(Self::Connected({
let len = stream.read_u16_le().await.unwrap();
let mut identities = Vec::with_capacity(len as usize);
for _ in 0..len {
identities.push(
RemoteIdentity::from_bytes(&decode::buf(stream).await.unwrap()).unwrap(),
);
}
identities
})),
d => Err(HeaderError::DiscriminatorInvalid(d)),
}
@ -101,12 +89,12 @@ impl Header {
encode::uuid(&mut bytes, uuid);
bytes
}
Self::File {
Self::File(HeaderFile {
id,
library_id,
file_path_id,
range,
} => {
}) => {
let mut buf = vec![4];
encode::uuid(&mut buf, id);
encode::uuid(&mut buf, library_id);
@ -114,18 +102,6 @@ impl Header {
buf.extend_from_slice(&range.to_bytes());
buf
}
Self::Connected(remote_identities) => {
let mut bytes = vec![255];
if remote_identities.len() > u16::MAX as usize {
panic!("Buf is too long!"); // TODO: Chunk this so it will never error
}
bytes.extend((remote_identities.len() as u16).to_le_bytes());
for identity in remote_identities {
encode::buf(&mut bytes, &identity.to_bytes());
}
bytes
}
}
}
}

View file

@ -1,267 +1,33 @@
use std::{collections::HashMap, sync::Arc};
use std::sync::Arc;
use itertools::{Either, Itertools};
use sd_p2p::{
proto::{decode, encode},
spacetunnel::{RemoteIdentity, Tunnel},
DiscoveredPeer, PeerId,
spacetunnel::Tunnel,
};
use sd_sync::CRDTOperation;
use serde::Serialize;
use specta::Type;
use sync::GetOpsArgs;
use tokio::{
io::{AsyncRead, AsyncWrite, AsyncWriteExt},
sync::RwLock,
};
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
use tracing::*;
use uuid::Uuid;
use crate::{
library::{Libraries, Library, LibraryManagerEvent},
sync,
};
use crate::{library::Library, sync};
use super::{Header, IdentityOrRemoteIdentity, P2PManager, PeerMetadata};
use super::{Header, P2PManager};
mod proto;
pub use proto::*;
#[derive(Debug, Clone, Copy, Serialize, Type)]
pub enum InstanceState {
Unavailable,
Discovered(PeerId),
Connected(PeerId),
}
#[derive(Debug, Clone, Serialize, Type)]
pub struct LibraryData {
pub instances: HashMap<RemoteIdentity /* Identity public key */, InstanceState>,
}
type LibrariesMap = HashMap<Uuid /* Library ID */, LibraryData>;
pub struct NetworkedLibraries {
p2p: Arc<P2PManager>,
pub(crate) libraries: RwLock<HashMap<Uuid /* Library ID */, LibraryData>>,
// A list of all instances that this node owns (has the private key for)
owned_instances: RwLock<HashMap<Uuid /* Library ID */, RemoteIdentity>>,
}
impl NetworkedLibraries {
pub fn new(p2p: Arc<P2PManager>, lm: &Libraries) -> Arc<Self> {
let this = Arc::new(Self {
p2p,
libraries: Default::default(),
owned_instances: Default::default(),
});
tokio::spawn({
let this = this.clone();
let rx = lm.rx.clone();
async move {
if let Err(err) = rx
.subscribe(|msg| {
let this = this.clone();
async move {
match msg {
LibraryManagerEvent::Load(library) => {
Self::load_library(&this, &library).await;
}
LibraryManagerEvent::Edit(library) => {
Self::edit_library(&this, &library).await;
}
LibraryManagerEvent::InstancesModified(library) => {
Self::load_library(&this, &library).await;
}
LibraryManagerEvent::Delete(library) => {
Self::delete_library(&this, &library).await;
}
}
}
})
.await
{
error!("Core may become unstable! NetworkedLibraryManager's library manager subscription aborted with error: {err:?}");
}
}
});
this
}
// TODO: Error handling
async fn load_library(self: &Arc<Self>, library: &Library) {
let (db_owned_instances, db_instances): (Vec<_>, Vec<_>) = library
.db
.instance()
.find_many(vec![])
.exec()
.await
.unwrap()
.into_iter()
.partition_map(
// TODO: Error handling
|i| match IdentityOrRemoteIdentity::from_bytes(&i.identity).unwrap() {
IdentityOrRemoteIdentity::Identity(identity) => Either::Left(identity),
IdentityOrRemoteIdentity::RemoteIdentity(identity) => Either::Right(identity),
},
);
// Lock them together to ensure changes to both become visible to readers at the same time
let mut libraries = self.libraries.write().await;
let mut owned_instances = self.owned_instances.write().await;
// `self.owned_instances` exists so this call to `load_library` does override instances of other libraries.
if db_owned_instances.len() != 1 {
panic!(
"Library has '{}' owned instance! Something has gone very wrong!",
db_owned_instances.len()
);
}
owned_instances.insert(library.id, db_owned_instances[0].to_remote_identity());
let mut old_data = libraries.remove(&library.id);
libraries.insert(
library.id,
LibraryData {
// We register all remote instances to track connection state(`IdentityOrRemoteIdentity::RemoteIdentity`'s only).
instances: db_instances
.into_iter()
.map(|identity| {
(
identity.clone(),
match old_data
.as_mut()
.and_then(|d| d.instances.remove(&identity))
{
Some(data) => data,
None => InstanceState::Unavailable,
},
)
})
.collect(),
},
);
self.p2p
.update_metadata(owned_instances.values().cloned().collect::<Vec<_>>())
.await;
}
async fn edit_library(&self, _library: &Library) {
// TODO: Send changes to all connected nodes!
// TODO: Update mdns
}
async fn delete_library(&self, library: &Library) {
// Lock them together to ensure changes to both become visible to readers at the same time
let mut libraries = self.libraries.write().await;
let mut owned_instances = self.owned_instances.write().await;
// TODO: Do proper library delete/unpair procedure.
libraries.remove(&library.id);
owned_instances.remove(&library.id);
self.p2p
.update_metadata(owned_instances.values().cloned().collect::<Vec<_>>())
.await;
}
// TODO: Replace all these follow events with a pub/sub system????
pub async fn peer_discovered(&self, event: DiscoveredPeer<PeerMetadata>) {
for lib in self.libraries.write().await.values_mut() {
if let Some((_pk, instance)) = lib
.instances
.iter_mut()
.find(|(pk, _)| event.metadata.instances.iter().any(|pk2| *pk2 == **pk))
{
if !matches!(instance, InstanceState::Connected(_)) {
let should_connect = matches!(instance, InstanceState::Unavailable);
*instance = InstanceState::Discovered(event.peer_id);
if should_connect {
event.dial().await;
}
}
return; // PK can only exist once so we short circuit
}
}
}
pub async fn peer_expired(&self, id: PeerId) {
for lib in self.libraries.write().await.values_mut() {
for instance in lib.instances.values_mut() {
if let InstanceState::Discovered(peer_id) = instance {
if *peer_id == id {
*instance = InstanceState::Unavailable;
}
}
}
}
}
pub async fn peer_connected(&self, peer_id: PeerId) {
// TODO: This is a very suboptimal way of doing this cause it assumes a discovery message will always come before discover which is false.
// TODO: Hence part of the need for `Self::peer_connected2`
for lib in self.libraries.write().await.values_mut() {
for instance in lib.instances.values_mut() {
if let InstanceState::Discovered(id) = instance {
if *id == peer_id {
*instance = InstanceState::Connected(peer_id);
return; // Will only exist once so we short circuit
}
}
}
}
}
// TODO: Remove need for this cause it's weird
pub async fn peer_connected2(&self, instance_id: RemoteIdentity, peer_id: PeerId) {
for lib in self.libraries.write().await.values_mut() {
if let Some(instance) = lib.instances.get_mut(&instance_id) {
*instance = InstanceState::Connected(peer_id);
return; // Will only exist once so we short circuit
}
}
}
pub async fn peer_disconnected(&self, peer_id: PeerId) {
for lib in self.libraries.write().await.values_mut() {
for instance in lib.instances.values_mut() {
if let InstanceState::Connected(id) = instance {
if *id == peer_id {
*instance = InstanceState::Unavailable;
return; // Will only exist once so we short circuit
}
}
}
}
}
pub async fn state(&self) -> LibrariesMap {
self.libraries.read().await.clone()
}
}
// These functions could be moved to some separate protocol abstraction
// which would be pretty cool.
//
// TODO: Error handling
pub use originator::run as originator;
mod originator {
use super::*;
use responder::tx as rx;
use sd_p2p::PeerStatus;
pub mod tx {
use super::*;
#[derive(Debug, PartialEq)]
pub struct Operations(pub Vec<CRDTOperation>);
impl Operations {
@ -283,33 +49,61 @@ mod originator {
buf
}
}
#[cfg(test)]
#[tokio::test]
async fn test() {
{
let original = Operations(vec![]);
let mut cursor = std::io::Cursor::new(original.to_bytes());
let result = Operations::from_stream(&mut cursor).await.unwrap();
assert_eq!(original, result);
}
{
let original = Operations(vec![CRDTOperation {
instance: Uuid::new_v4(),
timestamp: sync::NTP64(0),
id: Uuid::new_v4(),
typ: sd_sync::CRDTOperationType::Shared(sd_sync::SharedOperation {
record_id: serde_json::Value::Null,
model: "name".to_string(),
data: sd_sync::SharedOperationData::Create,
}),
}]);
let mut cursor = std::io::Cursor::new(original.to_bytes());
let result = Operations::from_stream(&mut cursor).await.unwrap();
assert_eq!(original, result);
}
}
}
/// REMEMBER: This only syncs one direction!
pub async fn run(
library_id: Uuid,
sync: &Arc<sync::Manager>,
nlm: &NetworkedLibraries,
p2p: &Arc<super::P2PManager>,
) {
let libraries = nlm.libraries.read().await;
let library = libraries.get(&library_id).unwrap();
pub async fn run(library_id: Uuid, sync: &Arc<sync::Manager>, p2p: &Arc<super::P2PManager>) {
let service = p2p.get_library_service(&library_id).unwrap();
// TODO: Deduplicate any duplicate peer ids -> This is an edge case but still
for instance in library.instances.values() {
let InstanceState::Connected(peer_id) = *instance else {
for (remote_identity, status) in service.get_state() {
let PeerStatus::Connected = status else {
continue;
};
let sync = sync.clone();
let p2p = p2p.clone();
let service = service.clone();
tokio::spawn(async move {
debug!(
"Alerting peer '{peer_id:?}' of new sync events for library '{library_id:?}'"
"Alerting peer '{remote_identity:?}' of new sync events for library '{library_id:?}'"
);
let mut stream = p2p.manager.stream(peer_id).await.map_err(|_| ()).unwrap(); // TODO: handle providing incorrect peer id
let mut stream = service
.connect(p2p.manager.clone(), &remote_identity)
.await
.map_err(|_| ())
.unwrap(); // TODO: handle providing incorrect peer id
stream
.write_all(&Header::Sync(library_id).to_bytes())
@ -350,7 +144,7 @@ mod responder {
use super::*;
#[derive(Serialize, Deserialize)]
#[derive(Serialize, Deserialize, PartialEq, Debug)]
pub enum MainRequest {
GetOperations(GetOpsArgs),
Done,
@ -374,6 +168,29 @@ mod responder {
buf
}
}
#[cfg(test)]
#[tokio::test]
async fn test() {
{
let original = MainRequest::GetOperations(GetOpsArgs {
clocks: vec![],
count: 0,
});
let mut cursor = std::io::Cursor::new(original.to_bytes());
let result = MainRequest::from_stream(&mut cursor).await.unwrap();
assert_eq!(original, result);
}
{
let original = MainRequest::Done;
let mut cursor = std::io::Cursor::new(original.to_bytes());
let result = MainRequest::from_stream(&mut cursor).await.unwrap();
assert_eq!(original, result);
}
}
}
pub async fn run(stream: &mut (impl AsyncRead + AsyncWrite + Unpin), library: Arc<Library>) {

View file

@ -28,11 +28,6 @@ impl SyncMessage {
#[cfg(test)]
mod tests {
// use sd_core_sync::NTP64;
// use sd_sync::SharedOperation;
// use serde_json::Value;
// use uuid::Uuid;
use super::*;
#[tokio::test]
@ -44,41 +39,5 @@ mod tests {
let result = SyncMessage::from_stream(&mut cursor).await.unwrap();
assert_eq!(original, result);
}
// {
// let original = SyncMessage::OperationsRequest(GetOpsArgs {
// clocks: vec![],
// count: 0,
// });
// let mut cursor = std::io::Cursor::new(original.to_bytes());
// let result = SyncMessage::from_stream(&mut cursor).await.unwrap();
// assert_eq!(original, result);
// }
// {
// let original = SyncMessage::OperationsRequestResponse(vec![]);
// let mut cursor = std::io::Cursor::new(original.to_bytes());
// let result = SyncMessage::from_stream(&mut cursor).await.unwrap();
// assert_eq!(original, result);
// }
// {
// let original = SyncMessage::OperationsRequestResponse(vec![CRDTOperation {
// instance: Uuid::new_v4(),
// timestamp: NTP64(0),
// id: Uuid::new_v4(),
// typ: sd_sync::CRDTOperationType::Shared(SharedOperation {
// record_id: Value::Null,
// model: "name".to_string(),
// data: sd_sync::SharedOperationData::Create,
// }),
// }]);
// let mut cursor = std::io::Cursor::new(original.to_bytes());
// let result = SyncMessage::from_stream(&mut cursor).await.unwrap();
// assert_eq!(original, result);
// }
}
}

View file

@ -1,5 +0,0 @@
// TODO: Define JSON file + open/close it
// TODO: Define messages for process + SPAKE part
// TODO: How is this gonna hook into the frontend?

View file

@ -29,7 +29,7 @@ if-watch = { version = "=3.0.1", features = [
mdns-sd = "0.7.4"
thiserror = "1.0.48"
tracing = { workspace = true }
serde = { version = "1.0.188", features = ["derive"] }
serde = { version = "1.0.188", features = ["derive"] } # TODO: Optional or remove feature
rmp-serde = "1.1.2"
specta = { workspace = true }
flume = "0.10.0" # Must match version used by `mdns-sd`
@ -38,7 +38,13 @@ arc-swap = "1.6.0"
ed25519-dalek = { version = "2.0.0", features = [] }
rand_core = { version = "0.6.4" }
uuid = "1.4.1"
hex = "0.4.3"
streamunordered = "0.5.3"
futures-core = "0.3.29"
tokio-stream = { version = "0.1.14", features = ["sync"] }
pin-project-lite = "0.2.13"
base64 = "0.21.5"
# chacha20poly1305 = "0.10.1"
# rand = "0.8.5"
[dev-dependencies]
tokio = { workspace = true, features = ["rt-multi-thread"] }

View file

@ -1,137 +1,141 @@
use std::{collections::HashMap, env, time::Duration};
// use std::{collections::HashMap, env, time::Duration};
use sd_p2p::{Event, Keypair, Manager, Metadata, MetadataManager, PeerId};
use tokio::{io::AsyncReadExt, time::sleep};
use tracing::{debug, error, info};
// use sd_p2p::{Event, Keypair, Manager, Metadata};
// use tokio::{io::AsyncReadExt, time::sleep};
// use tracing::{debug, error, info};
#[derive(Debug, Clone)]
pub struct PeerMetadata {
name: String,
}
impl Metadata for PeerMetadata {
fn to_hashmap(self) -> HashMap<String, String> {
HashMap::from([("name".to_owned(), self.name)])
}
fn from_hashmap(_: &PeerId, data: &HashMap<String, String>) -> Result<Self, String>
where
Self: Sized,
{
Ok(Self {
name: data
.get("name")
.ok_or_else(|| {
"DNS record for field 'name' missing. Unable to decode 'PeerMetadata'!"
.to_owned()
})?
.to_owned(),
})
}
}
#[tokio::main]
async fn main() {
tracing_subscriber::fmt()
.with_env_filter(
tracing_subscriber::EnvFilter::from_default_env()
.add_directive("basic=trace".parse().unwrap())
.add_directive("sd-p2p=trace".parse().unwrap())
.add_directive("info".parse().unwrap()),
)
.try_init()
.unwrap();
let keypair = Keypair::generate();
let metadata_manager = MetadataManager::new(PeerMetadata {
name: "TODO".to_string(),
});
let (manager, mut stream) =
Manager::new("p2p-demo", &keypair, Default::default(), metadata_manager)
.await
.unwrap();
info!(
"Node '{}' is now online listening at addresses: {:?}",
manager.peer_id(),
manager.listen_addrs().await
);
tokio::spawn(async move {
let mut shutdown = false;
// Your application must keeping poll this stream to keep the P2P system running
while let Some(event) = stream.next().await {
match event {
Event::PeerDiscovered(event) => {
println!(
"Discovered peer by id '{}' with address '{:?}' and metadata: {:?}",
event.peer_id, event.addresses, event.metadata
);
event.dial().await; // We connect to everyone we find on the network. Your app will probs wanna restrict this!
}
Event::PeerMessage(mut event) => {
debug!("Peer '{}' established unicast stream", event.peer_id);
tokio::spawn(async move {
let mut buf = [0; 100];
let n = event.stream.read(&mut buf).await.unwrap();
println!("GOT UNICAST: {:?}", std::str::from_utf8(&buf[..n]).unwrap());
});
}
Event::PeerBroadcast(mut event) => {
debug!("Peer '{}' established broadcast stream", event.peer_id);
tokio::spawn(async move {
let mut buf = [0; 100];
let n = event.stream.read(&mut buf).await.unwrap();
println!(
"GOT BROADCAST: {:?}",
std::str::from_utf8(&buf[..n]).unwrap()
);
});
}
Event::Shutdown => {
info!("Manager shutdown!");
shutdown = true;
break;
}
_ => debug!("event: {:?}", event),
}
}
if !shutdown {
error!("Manager event stream closed! The core is unstable from this point forward!");
// process.exit(1); // TODO: Should I?
}
});
if env::var("PING").as_deref() != Ok("skip") {
let manager = manager.clone();
tokio::spawn(async move {
sleep(Duration::from_millis(500)).await;
// Send pings to every client every 3 second after startup
loop {
sleep(Duration::from_secs(3)).await;
manager
.broadcast(
format!("Hello World From {}", keypair.peer_id())
.as_bytes()
.to_vec(),
)
.await;
debug!("Sent ping broadcast to all connected peers!");
}
});
}
// TODO: proper shutdown
// https://docs.rs/ctrlc/latest/ctrlc/
// https://docs.rs/system_shutdown/latest/system_shutdown/
tokio::time::sleep(Duration::from_secs(100)).await;
manager.shutdown().await; // It is super highly recommended to shutdown the manager before exiting your application so an Mdns update can be broadcasted
// #[derive(Debug, Clone)]
// pub struct PeerMetadata {
// name: String,
// }
// impl Metadata for PeerMetadata {
// fn to_hashmap(self) -> HashMap<String, String> {
// HashMap::from([("name".to_owned(), self.name)])
// }
// fn from_hashmap(data: &HashMap<String, String>) -> Result<Self, String>
// where
// Self: Sized,
// {
// Ok(Self {
// name: data
// .get("name")
// .ok_or_else(|| {
// "DNS record for field 'name' missing. Unable to decode 'PeerMetadata'!"
// .to_owned()
// })?
// .to_owned(),
// })
// }
// }
// #[tokio::main]
// async fn main() {
// tracing_subscriber::fmt()
// .with_env_filter(
// tracing_subscriber::EnvFilter::from_default_env()
// .add_directive("basic=trace".parse().unwrap())
// .add_directive("sd-p2p=trace".parse().unwrap())
// .add_directive("info".parse().unwrap()),
// )
// .try_init()
// .unwrap();
// let keypair = Keypair::generate();
// let metadata_manager = MetadataManager::new(PeerMetadata {
// name: "TODO".to_string(),
// });
// let (manager, mut stream) = Manager::new("p2p-demo", &keypair, Default::default())
// .await
// .unwrap();
// info!(
// "Node '{}' is now online listening at addresses: {:?}",
// manager.identity(),
// stream.listen_addrs()
// );
// tokio::spawn(async move {
// let mut shutdown = false;
// // Your application must keeping poll this stream to keep the P2P system running
// while let Some(event) = stream.next().await {
// match event {
// // TODO: Refactor example to use `Service` struct
// // Event::PeerDiscovered(event) => {
// // println!(
// // "Discovered peer by id '{}' with address '{:?}' and metadata: {:?}",
// // event.peer_id, event.addresses, event.metadata
// // );
// // event.dial().await; // We connect to everyone we find on the network. Your app will probs wanna restrict this!
// // }
// Event::PeerMessage(mut event) => {
// debug!("Peer '{}' established unicast stream", event.identity);
// tokio::spawn(async move {
// let mut buf = [0; 100];
// let n = event.stream.read(&mut buf).await.unwrap();
// println!("GOT UNICAST: {:?}", std::str::from_utf8(&buf[..n]).unwrap());
// });
// }
// Event::PeerBroadcast(mut event) => {
// debug!("Peer '{}' established broadcast stream", event.identity);
// tokio::spawn(async move {
// let mut buf = [0; 100];
// let n = event.stream.read(&mut buf).await.unwrap();
// println!(
// "GOT BROADCAST: {:?}",
// std::str::from_utf8(&buf[..n]).unwrap()
// );
// });
// }
// Event::Shutdown => {
// info!("Manager shutdown!");
// shutdown = true;
// break;
// }
// _ => debug!("event: {:?}", event),
// }
// }
// if !shutdown {
// error!("Manager event stream closed! The core is unstable from this point forward!");
// // process.exit(1); // TODO: Should I?
// }
// });
// if env::var("PING").as_deref() != Ok("skip") {
// let manager = manager.clone();
// tokio::spawn(async move {
// sleep(Duration::from_millis(500)).await;
// // Send pings to every client every 3 second after startup
// loop {
// sleep(Duration::from_secs(3)).await;
// manager
// .broadcast(
// format!("Hello World From {}", keypair.peer_id())
// .as_bytes()
// .to_vec(),
// )
// .await;
// debug!("Sent ping broadcast to all connected peers!");
// }
// });
// }
// // TODO: proper shutdown
// // https://docs.rs/ctrlc/latest/ctrlc/
// // https://docs.rs/system_shutdown/latest/system_shutdown/
// tokio::time::sleep(Duration::from_secs(100)).await;
// manager.shutdown().await; // It is super highly recommended to shutdown the manager before exiting your application so an Mdns update can be broadcasted
// }
fn main() {
todo!("TODO: Update example");
}

View file

@ -0,0 +1,155 @@
use std::{
collections::{HashMap, HashSet},
future::poll_fn,
net::SocketAddr,
sync::{Arc, PoisonError, RwLock},
task::Poll,
};
use libp2p::PeerId;
use tokio::sync::{broadcast, mpsc};
use tracing::trace;
use crate::{spacetunnel::RemoteIdentity, ManagerConfig, Mdns, ServiceEventInternal};
type ServiceName = String;
pub(crate) type ListenAddrs = HashSet<SocketAddr>;
pub(crate) type State = Arc<RwLock<DiscoveryManagerState>>;
/// DiscoveryManager controls all user-defined [Service]'s and connects them with the network through mDNS and other discovery protocols
pub(crate) struct DiscoveryManager {
pub(crate) state: State,
pub(crate) listen_addrs: ListenAddrs,
pub(crate) application_name: &'static str,
pub(crate) identity: RemoteIdentity,
pub(crate) peer_id: PeerId,
pub(crate) mdns: Option<Mdns>,
// TODO: Split these off `DiscoveryManagerState` and parse around on their own struct???
pub(crate) do_broadcast_rx: broadcast::Receiver<()>,
pub(crate) service_shutdown_rx: mpsc::Receiver<String>,
}
impl DiscoveryManager {
pub(crate) fn new(
application_name: &'static str,
identity: RemoteIdentity,
peer_id: PeerId,
config: &ManagerConfig,
state: State,
service_shutdown_rx: mpsc::Receiver<String>,
) -> Result<Self, mdns_sd::Error> {
let mut mdns = None;
if config.enabled {
mdns = Some(Mdns::new(application_name, identity, peer_id)?);
}
let do_broadcast_rx = state
.read()
.unwrap_or_else(PoisonError::into_inner)
.do_broadcast
.subscribe();
Ok(Self {
state,
listen_addrs: Default::default(),
application_name,
identity,
peer_id,
mdns,
do_broadcast_rx,
service_shutdown_rx,
})
}
/// is called on changes to `self.services` to make sure all providers update their records
pub(crate) fn do_advertisement(&mut self) {
trace!("Broadcasting new service records");
if let Some(mdns) = &mut self.mdns {
mdns.do_advertisement(&self.listen_addrs, &self.state);
}
}
pub(crate) async fn poll(&mut self) {
tokio::select! {
_ = self.do_broadcast_rx.recv() => self.do_advertisement(),
service_name = self.service_shutdown_rx.recv() => {
if let Some(service_name) = service_name {
let mut state = self.state.write().unwrap_or_else(PoisonError::into_inner);
state.services.remove(&service_name);
state.discovered.remove(&service_name);
state.known.remove(&service_name);
}
// TODO
self.do_advertisement();
}
_ = poll_fn(|cx| {
if let Some(mdns) = &mut self.mdns {
return mdns.poll(cx, &self.listen_addrs, &self.state);
}
Poll::Pending
}) => {},
}
}
pub(crate) fn shutdown(&self) {
if let Some(mdns) = &self.mdns {
mdns.shutdown();
}
}
}
#[derive(Debug, Clone)]
#[allow(clippy::type_complexity)]
pub(crate) struct DiscoveryManagerState {
/// A list of services the current node is advertising w/ their metadata
pub(crate) services: HashMap<
ServiceName,
(
broadcast::Sender<(String, ServiceEventInternal)>,
// Will be `None` prior to the first `.set` call
Option<HashMap<String, String>>,
),
>,
/// A map of organically discovered peers
pub(crate) discovered: HashMap<ServiceName, HashMap<RemoteIdentity, DiscoveredPeerCandidate>>,
/// A map of peers we know about. These may be connected or not avaiable.
/// This is designed around the Relay/NAT hole punching service where we need to emit who we wanna discover
/// Note: this may contain duplicates with `discovered` as they will *not* be removed from here when found
pub(crate) known: HashMap<ServiceName, HashSet<RemoteIdentity>>,
/// Used to trigger an rebroadcast. This should be called when mutating this struct.
/// You are intended to clone out of this instead of locking the whole struct's `RwLock` each time you wanna use it.
/// This is a channel with a capacity of 1. If sending fails we know someone else has already requested broadcast and we can ignore the error.
pub(crate) do_broadcast: broadcast::Sender<()>,
/// Used to trigger the removal of a `Service`. This is used in the `impl Drop for Service`
/// You are intended to clone out of this instead of locking the whole struct's `RwLock` each time you wanna use it.
pub(crate) service_shutdown_tx: mpsc::Sender<String>,
}
impl DiscoveryManagerState {
pub fn new() -> (Arc<RwLock<Self>>, mpsc::Receiver<String>) {
let (service_shutdown_tx, service_shutdown_rx) = mpsc::channel(10);
(
Arc::new(RwLock::new(Self {
services: Default::default(),
discovered: Default::default(),
known: Default::default(),
do_broadcast: broadcast::channel(1).0,
service_shutdown_tx,
})),
service_shutdown_rx,
)
}
}
#[derive(Debug, Clone)]
pub(crate) struct DiscoveredPeerCandidate {
pub(crate) peer_id: PeerId,
pub(crate) meta: HashMap<String, String>,
pub(crate) addresses: Vec<SocketAddr>,
}

View file

@ -1,102 +1,68 @@
use std::{
collections::{HashMap, HashSet},
collections::HashMap,
net::{IpAddr, SocketAddr},
pin::Pin,
str::FromStr,
sync::Arc,
sync::PoisonError,
task::{Context, Poll},
thread::sleep,
time::Duration,
};
use mdns_sd::{ServiceDaemon, ServiceEvent, ServiceInfo};
use tokio::{
sync::{mpsc, RwLock},
time::{sleep_until, Instant, Sleep},
use futures_core::Stream;
use libp2p::{
futures::{FutureExt, StreamExt},
PeerId,
};
use tracing::{debug, error, trace, warn};
use mdns_sd::{ServiceDaemon, ServiceEvent, ServiceInfo};
use streamunordered::{StreamUnordered, StreamYield};
use tokio::time::{sleep_until, Instant, Sleep};
use tracing::{error, trace, warn};
use crate::{DiscoveredPeer, Event, Manager, Metadata, MetadataManager, PeerId};
use crate::{
spacetunnel::RemoteIdentity, DiscoveredPeerCandidate, ListenAddrs, ServiceEventInternal, State,
};
/// TODO
const MDNS_READVERTISEMENT_INTERVAL: Duration = Duration::from_secs(60); // Every minute re-advertise
/// TODO
#[derive(Debug)]
pub struct MdnsState<TMetadata: Metadata> {
pub discovered: RwLock<HashMap<PeerId, DiscoveredPeer<TMetadata>>>,
pub listen_addrs: RwLock<HashSet<SocketAddr>>,
}
/// TODO
pub struct Mdns<TMetadata>
where
TMetadata: Metadata,
{
// used to ignore events from our own mdns advertisement
pub(crate) struct Mdns {
identity: RemoteIdentity,
peer_id: PeerId,
metadata_manager: Arc<MetadataManager<TMetadata>>,
mdns_daemon: ServiceDaemon,
mdns_service_receiver: flume::Receiver<ServiceEvent>,
service_name: String,
advertised_services: Vec<String>,
mdns_daemon: ServiceDaemon,
next_mdns_advertisement: Pin<Box<Sleep>>,
next_allowed_discovery_advertisement: Instant,
trigger_advertisement: mpsc::UnboundedReceiver<()>,
pub(crate) state: Arc<MdnsState<TMetadata>>,
// This is an ugly workaround for: https://github.com/keepsimple1/mdns-sd/issues/145
mdns_rx: StreamUnordered<MdnsRecv>,
}
impl<TMetadata> Mdns<TMetadata>
where
TMetadata: Metadata,
{
pub async fn new(
impl Mdns {
pub(crate) fn new(
application_name: &'static str,
identity: RemoteIdentity,
peer_id: PeerId,
metadata_manager: Arc<MetadataManager<TMetadata>>,
) -> Result<(Self, Arc<MdnsState<TMetadata>>), mdns_sd::Error> {
) -> Result<Self, mdns_sd::Error> {
let mdns_daemon = ServiceDaemon::new()?;
let service_name = format!("_{}._udp.local.", application_name);
let mdns_service_receiver = mdns_daemon.browse(&service_name)?;
let (advertise_tx, advertise_rx) = mpsc::unbounded_channel();
metadata_manager.set_tx(advertise_tx).await;
let state = Arc::new(MdnsState {
discovered: RwLock::new(Default::default()),
listen_addrs: RwLock::new(Default::default()),
});
Ok((
Self {
peer_id,
metadata_manager,
mdns_daemon,
mdns_service_receiver,
service_name,
next_mdns_advertisement: Box::pin(sleep_until(Instant::now())), // Trigger an advertisement immediately
next_allowed_discovery_advertisement: Instant::now(),
trigger_advertisement: advertise_rx,
state: state.clone(),
},
state,
))
}
pub fn unregister_mdns(&self) -> mdns_sd::Result<mdns_sd::Receiver<mdns_sd::UnregisterStatus>> {
self.mdns_daemon
.unregister(&format!("{}.{}", self.peer_id, self.service_name))
Ok(Self {
identity,
peer_id,
service_name: format!("_{}._udp.local.", application_name),
advertised_services: Vec::new(),
mdns_daemon,
next_mdns_advertisement: Box::pin(sleep_until(Instant::now())), // Trigger an advertisement immediately
mdns_rx: StreamUnordered::new(),
})
}
/// Do an mdns advertisement to the network.
async fn advertise(&mut self) {
self.inner_advertise().await;
pub(super) fn do_advertisement(&mut self, listen_addrs: &ListenAddrs, state: &State) {
trace!("doing mDNS advertisement!");
self.next_mdns_advertisement =
Box::pin(sleep_until(Instant::now() + MDNS_READVERTISEMENT_INTERVAL));
}
async fn inner_advertise(&self) {
let metadata = self.metadata_manager.get().to_hashmap();
// TODO: Second stage rate-limit
let mut ports_to_service = HashMap::new();
let listen_addrs = self.state.listen_addrs.read().await;
for addr in listen_addrs.iter() {
let addr = match addr {
SocketAddr::V4(addr) => addr,
@ -111,193 +77,258 @@ where
.push(addr.ip());
}
// This method takes `&mut self` so we know we have exclusive access to `advertised_services`
let mut advertised_services_to_remove = self.advertised_services.clone();
let state = state.read().unwrap_or_else(PoisonError::into_inner);
for (port, ips) in ports_to_service.into_iter() {
let service = match ServiceInfo::new(
&self.service_name,
&self.peer_id.to_string(),
&format!("{}.", self.peer_id),
&*ips,
port,
Some(metadata.clone()), // TODO: Prevent the user defining a value that overflows a DNS record
) {
Ok(service) => service,
Err(err) => {
warn!("error creating mdns service info: {}", err);
for (service_name, (_, metadata)) in &state.services {
let Some(metadata) = metadata else {
continue;
};
let service_domain =
// TODO: Use "Selective Instance Enumeration" instead in future but right now it is causing `TMeta` to get garbled.
// format!("{service_name}._sub._{}", self.service_name)
format!("{service_name}._sub._{service_name}{}", self.service_name);
let mut meta = metadata.clone();
meta.insert("__peer_id".into(), self.peer_id.to_string());
let service = match ServiceInfo::new(
&service_domain,
&self.identity.to_string(), // TODO: This shows up in `fullname` without sub service. Is that a problem???
&format!("{}.{}.", service_name, self.identity), // TODO: Should this change???
&*ips, // TODO: &[] as &[Ipv4Addr],
port,
Some(meta.clone()), // TODO: Prevent the user defining a value that overflows a DNS record
) {
Ok(service) => service, // TODO: .enable_addr_auto(), // TODO: using autoaddrs or not???
Err(err) => {
warn!("error creating mdns service info: {}", err);
continue;
}
};
let service_name = service.get_fullname().to_string();
advertised_services_to_remove.retain(|s| *s != service_name);
self.advertised_services.push(service_name);
if !self
.mdns_rx
.iter_with_token()
.any(|(s, _)| s.1 == service_domain)
{
self.mdns_rx.insert(MdnsRecv(
self.mdns_daemon
.browse(&service_domain)
.unwrap()
.into_stream(),
service_domain,
));
}
};
trace!("advertising mdns service: {:?}", service);
match self.mdns_daemon.register(service) {
Ok(_) => {}
Err(err) => warn!("error registering mdns service: {}", err),
}
}
}
// TODO: if the channel's sender is dropped will this cause the `tokio::select` in the `manager.rs` to infinitely loop?
pub async fn poll(&mut self, manager: &Arc<Manager<TMetadata>>) -> Option<Event<TMetadata>> {
tokio::select! {
_ = &mut self.next_mdns_advertisement => self.advertise().await,
_ = self.trigger_advertisement.recv() => self.advertise().await,
event = self.mdns_service_receiver.recv_async() => {
let event = event.unwrap(); // TODO: Error handling
match event {
ServiceEvent::SearchStarted(_) => {}
ServiceEvent::ServiceFound(_, _) => {}
ServiceEvent::ServiceResolved(info) => {
let raw_peer_id = info
.get_fullname()
.replace(&format!(".{}", self.service_name), "");
match PeerId::from_str(&raw_peer_id) {
Ok(peer_id) => {
// Prevent discovery of the current peer.
if peer_id == self.peer_id {
return None;
}
match TMetadata::from_hashmap(
&peer_id,
&info
.get_properties()
.iter()
.map(|v| (v.key().to_owned(), v.val_str().to_owned()))
.collect(),
) {
Ok(metadata) => {
let peer = {
let mut discovered_peers =
self.state.discovered.write().await;
let peer = if let Some(peer) = discovered_peers.remove(&peer_id) {
peer
} else {
// Found a new peer, let's readvertise our mdns service as it may have just come online
// `self.last_discovery_advertisement` is to prevent DOS-style attacks.
let now = Instant::now();
if self.next_allowed_discovery_advertisement <= now {
self.next_allowed_discovery_advertisement = now + Duration::from_secs(1);
self.inner_advertise().await;
self.next_mdns_advertisement =
Box::pin(sleep_until(Instant::now() + MDNS_READVERTISEMENT_INTERVAL));
}
DiscoveredPeer {
manager: manager.clone(),
peer_id,
metadata,
addresses: info
.get_addresses()
.iter()
.map(|addr| {
SocketAddr::new(
IpAddr::V4(*addr),
info.get_port(),
)
})
.collect(),
}
};
discovered_peers.insert(peer_id, peer.clone());
peer
};
debug!(
"Discovered peer by id '{}' with address '{:?}' and metadata: {:?}",
peer.peer_id, peer.addresses, peer.metadata
);
return Some(Event::PeerDiscovered(peer));
}
Err(err) => error!("error parsing metadata for peer '{}': {}", raw_peer_id, err)
}
}
Err(_) => warn!(
"resolved peer advertising itself with an invalid peer_id '{}'",
raw_peer_id
),
}
}
ServiceEvent::ServiceRemoved(_, fullname) => {
let raw_peer_id = fullname.replace(&format!(".{}", self.service_name), "");
match PeerId::from_str(&raw_peer_id) {
Ok(peer_id) => {
// Prevent discovery of the current peer.
if peer_id == self.peer_id {
return None;
}
{
let mut discovered_peers =
self.state.discovered.write().await;
let peer = discovered_peers.remove(&peer_id);
let metadata = peer.map(|p| p.metadata);
debug!("Peer '{peer_id}' expired with metadata: {metadata:?}");
return Some(Event::PeerExpired {
id: peer_id,
metadata,
});
}
}
Err(_) => warn!(
"resolved peer de-advertising itself with an invalid peer_id '{}'",
raw_peer_id
),
}
}
ServiceEvent::SearchStopped(_) => {}
// TODO: Do a proper diff and remove old services
trace!("advertising mdns service: {:?}", service);
match self.mdns_daemon.register(service) {
Ok(_) => {}
Err(err) => warn!("error registering mdns service: {}", err),
}
}
};
None
}
pub async fn register_addr(&mut self, addr: SocketAddr) {
self.state.listen_addrs.write().await.insert(addr);
// If the next mdns advertisement is more than 250ms away, then we should queue one closer to now.
// This acts as a debounce for advertisements when many addresses are discovered close to each other (Eg. at startup)
if self.next_mdns_advertisement.deadline() > (Instant::now() + Duration::from_millis(250)) {
self.next_mdns_advertisement =
Box::pin(sleep_until(Instant::now() + Duration::from_millis(200)));
}
}
pub async fn unregister_addr(&mut self, addr: &SocketAddr) {
self.state.listen_addrs.write().await.remove(addr);
// If the next mdns advertisement is more than 250ms away, then we should queue one closer to now.
// This acts as a debounce for advertisements when many addresses are discovered close to each other (Eg. at startup)
if self.next_mdns_advertisement.deadline() > (Instant::now() + Duration::from_millis(250)) {
self.next_mdns_advertisement =
Box::pin(sleep_until(Instant::now() + Duration::from_millis(200)));
}
}
pub async fn shutdown(&self) {
match self
.mdns_daemon
.unregister(&format!("{}.{}", self.peer_id, self.service_name))
.map(|chan| chan.recv())
{
Ok(Ok(_)) => {}
Ok(Err(err)) => {
warn!(
"shutdown error recieving shutdown status from mdns service: {}",
err
);
for service_domain in advertised_services_to_remove {
if let Some((_, token)) = self
.mdns_rx
.iter_with_token()
.find(|(s, _)| s.1 == service_domain)
{
Pin::new(&mut self.mdns_rx).remove(token);
}
Err(err) => {
warn!("shutdown error unregistering mdns service: {}", err);
self.mdns_daemon.unregister(&service_domain).unwrap();
}
// If mDNS advertisement is not queued in future, queue one
if self.next_mdns_advertisement.is_elapsed() {
self.next_mdns_advertisement =
Box::pin(sleep_until(Instant::now() + MDNS_READVERTISEMENT_INTERVAL));
}
}
pub(crate) fn poll(
&mut self,
cx: &mut Context<'_>,
listen_addrs: &ListenAddrs,
state: &State,
) -> Poll<()> {
let mut is_pending = false;
while !is_pending {
match self.next_mdns_advertisement.poll_unpin(cx) {
Poll::Ready(()) => self.do_advertisement(listen_addrs, state),
Poll::Pending => is_pending = true,
}
match self.mdns_rx.poll_next_unpin(cx) {
Poll::Ready(Some((result, _))) => match result {
StreamYield::Item(event) => self.on_event(event, state),
StreamYield::Finished(_) => {}
},
Poll::Ready(None) => {}
Poll::Pending => is_pending = true,
}
}
Poll::Pending
}
fn on_event(&mut self, event: ServiceEvent, state: &State) {
match event {
ServiceEvent::SearchStarted(_) => {}
ServiceEvent::ServiceFound(_, _) => {}
ServiceEvent::ServiceResolved(info) => {
let Some(subdomain) = info.get_subtype() else {
warn!("resolved mDNS peer advertising itself with missing subservice");
return;
};
let service_name = subdomain.split("._sub.").next().unwrap(); // TODO: .replace(&format!("._sub.{}", self.service_name), "");
let raw_remote_identity = info
.get_fullname()
.replace(&format!("._{service_name}{}", self.service_name), "");
let Ok(identity) = RemoteIdentity::from_str(&raw_remote_identity) else {
warn!(
"resolved peer advertising itself with an invalid RemoteIdentity('{}')",
raw_remote_identity
);
return;
};
// Prevent discovery of the current peer.
if identity == self.identity {
return;
}
let mut meta = info
.get_properties()
.iter()
.map(|v| (v.key().to_owned(), v.val_str().to_owned()))
.collect::<HashMap<_, _>>();
let Some(peer_id) = meta.remove("__peer_id") else {
warn!(
"resolved mDNS peer advertising itself with missing '__peer_id' metadata"
);
return;
};
let Ok(peer_id) = PeerId::from_str(&peer_id) else {
warn!(
"resolved mDNS peer advertising itself with invalid '__peer_id' metadata"
);
return;
};
let mut state = state.write().unwrap_or_else(PoisonError::into_inner);
if let Some((tx, _)) = state.services.get_mut(service_name) {
tx.send((
service_name.to_string(),
ServiceEventInternal::Discovered {
identity,
metadata: meta.clone(),
},
))
.unwrap();
} else {
warn!(
"mDNS service '{service_name}' is missing from 'state.services'. This is likely a bug!"
);
}
if let Some(discovered) = state.discovered.get_mut(service_name) {
discovered.insert(
identity,
DiscoveredPeerCandidate {
peer_id,
meta,
addresses: info
.get_addresses()
.iter()
.map(|addr| SocketAddr::new(IpAddr::V4(*addr), info.get_port()))
.collect(),
},
);
} else {
warn!("mDNS service '{service_name}' is missing from 'state.discovered'. This is likely a bug!");
}
}
ServiceEvent::ServiceRemoved(service_type, fullname) => {
let service_name = service_type.split("._sub.").next().unwrap();
let raw_remote_identity =
fullname.replace(&format!("._{service_name}{}", self.service_name), "");
let Ok(identity) = RemoteIdentity::from_str(&raw_remote_identity) else {
warn!(
"resolved peer deadvertising itself with an invalid RemoteIdentity('{}')",
raw_remote_identity
);
return;
};
// Prevent discovery of the current peer.
if identity == self.identity {
return;
}
let mut state = state.write().unwrap_or_else(PoisonError::into_inner);
if let Some((tx, _)) = state.services.get_mut(service_name) {
tx.send((
service_name.to_string(),
ServiceEventInternal::Expired { identity },
))
.unwrap();
} else {
warn!(
"mDNS service '{service_name}' is missing from 'state.services'. This is likely a bug!"
);
}
if let Some(discovered) = state.discovered.get_mut(service_name) {
discovered.remove(&identity);
} else {
warn!("mDNS service '{service_name}' is missing from 'state.discovered'. This is likely a bug!");
}
}
ServiceEvent::SearchStopped(_) => {}
}
}
pub(crate) fn shutdown(&self) {
for service in &self.advertised_services {
self.mdns_daemon
.unregister(service)
.map_err(|err| {
error!("error removing mdns service '{service}': {err}");
})
.ok();
}
// TODO: Without this mDNS is not sending it goodbye packets without a timeout. Try and remove this cause it makes shutdown slow.
sleep(Duration::from_millis(100));
self.mdns_daemon.shutdown().unwrap_or_else(|err| {
error!("shutdown error shutting down mdns daemon: {}", err);
error!("error shutting down mdns daemon: {err}");
});
}
}
struct MdnsRecv(flume::r#async::RecvStream<'static, ServiceEvent>, String);
impl Stream for MdnsRecv {
type Item = ServiceEvent;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.0.poll_next_unpin(cx)
}
}

View file

@ -1,50 +0,0 @@
use std::{fmt, sync::Arc};
use arc_swap::ArcSwap;
use tokio::sync::{mpsc, OnceCell};
use tracing::warn;
use crate::Metadata;
/// is a wrapper around `ArcSwap` and provides an API for the application to update the metadata about the current device.
/// This wrapper exists to ensure we ask the MDNS service to re-advertise the new metadata on change.
pub struct MetadataManager<TMetadata: Metadata>(
ArcSwap<TMetadata>,
// Starts out `None` cause this is constructed in userspace but when passed into `Manager::new` this will be set.
OnceCell<mpsc::UnboundedSender<()>>,
);
impl<TMetdata: Metadata + fmt::Debug> fmt::Debug for MetadataManager<TMetdata> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("MetadataManager")
.field("metadata", &self.0.load())
.finish()
}
}
impl<TMetadata: Metadata> MetadataManager<TMetadata> {
pub fn new(metadata: TMetadata) -> Arc<Self> {
Arc::new(Self(ArcSwap::new(Arc::new(metadata)), OnceCell::default()))
}
pub(crate) async fn set_tx(&self, tx: mpsc::UnboundedSender<()>) {
self.1.get_or_init(move || async move { tx }).await;
}
/// Returns a copy of the current metadata
pub fn get(&self) -> TMetadata {
TMetadata::clone(&self.0.load())
}
/// Updates the metadata and asks the MDNS service to re-advertise the new metadata
pub fn update(&self, metadata: TMetadata) {
self.0.store(Arc::new(metadata));
if let Some(chan) = self.1.get() {
chan.send(())
.map_err(|_| {
warn!("'MetadataManager' failed to ask the MDNS server to re-advertise!");
})
.ok();
}
}
}

View file

@ -1,5 +1,7 @@
mod manager;
mod mdns;
mod metadata_manager;
mod service;
pub use mdns::*;
pub use metadata_manager::*;
pub(crate) use manager::*;
pub(crate) use mdns::*;
pub use service::*;

View file

@ -0,0 +1,298 @@
use std::{
collections::HashMap,
marker::PhantomData,
pin::Pin,
sync::{Arc, PoisonError, RwLock},
task::{Context, Poll},
};
use futures_core::Stream;
use libp2p::futures::StreamExt;
use pin_project_lite::pin_project;
use thiserror::Error;
use tokio::sync::{broadcast, mpsc};
use tokio_stream::wrappers::{errors::BroadcastStreamRecvError, BroadcastStream};
use tracing::warn;
use crate::{
spacetime::UnicastStream, spacetunnel::RemoteIdentity, DiscoveredPeer, DiscoveryManagerState,
Manager, Metadata,
};
/// A Service represents a thing your application exposes to the network that can be discovered and connected to.
pub struct Service<TMeta> {
name: String,
state: Arc<RwLock<DiscoveryManagerState>>,
do_broadcast: broadcast::Sender<()>,
service_shutdown_tx: mpsc::Sender<String>,
manager: Arc<Manager>,
phantom: PhantomData<fn() -> TMeta>,
}
impl<TMeta: Metadata> Service<TMeta> {
// Construct a new service. This will not cause an advertisement until [Self::update] is called!
pub fn new(
name: impl Into<String>,
manager: Arc<Manager>,
) -> Result<Self, ErrDuplicateServiceName> {
let name = name.into();
let state = manager.discovery_state.clone();
let (do_broadcast, service_shutdown_tx) = {
let mut state = state.write().unwrap_or_else(PoisonError::into_inner);
if state.services.contains_key(&name) {
return Err(ErrDuplicateServiceName);
}
state.discovered.insert(name.clone(), Default::default());
state
.services
.insert(name.clone(), (broadcast::channel(20).0, Default::default()));
(
state.do_broadcast.clone(),
state.service_shutdown_tx.clone(),
)
};
// TODO: We call this but it won't have metadata set so it won't actually expose it
// However, it must be called to properly setup the listener (at least right now)
do_broadcast.send(()).ok();
Ok(Self {
name,
state,
do_broadcast,
service_shutdown_tx,
manager,
phantom: PhantomData,
})
}
pub fn name(&self) -> &str {
&self.name
}
pub fn update(&self, meta: TMeta) {
if let Some((_, services_meta)) = self
.state
.write()
.unwrap_or_else(PoisonError::into_inner)
.services
.get_mut(&self.name)
{
let meta = meta.to_hashmap();
let did_change = services_meta.as_ref().map(|v| *v == meta).unwrap_or(false);
*services_meta = Some(meta);
if did_change {
self.do_broadcast.send(()).ok();
}
} else {
warn!(
"Service::update called on non-existent service '{}'. This indicates a major bug in P2P!",
self.name
);
}
}
pub fn get_state(&self) -> HashMap<RemoteIdentity, PeerStatus> {
let connected = self
.manager
.state
.read()
.unwrap_or_else(PoisonError::into_inner)
.connected
.values()
.map(|remote_identity| (*remote_identity, PeerStatus::Connected))
.collect::<Vec<_>>();
let state = self.state.read().unwrap_or_else(PoisonError::into_inner);
state
.known
.get(&self.name)
.into_iter()
.flatten()
.map(|remote_identity| (*remote_identity, PeerStatus::Unavailable))
// We do these after the `Unavailable` to replace the keys that are in both
.chain(connected)
.chain(
state
.discovered
.get(&self.name)
.into_iter()
.flatten()
.map(|(remote_identity, _)| (*remote_identity, PeerStatus::Discovered)),
)
.collect::<HashMap<RemoteIdentity, PeerStatus>>()
}
pub fn add_known(&self, identity: Vec<RemoteIdentity>) {
self.state
.write()
.unwrap_or_else(PoisonError::into_inner)
.known
.entry(self.name.clone())
.or_default()
.extend(identity);
// TODO: Probally signal to discovery manager that we have new known peers -> This will be need for Relay but not for mDNS
}
// TODO: Remove in favor of `get_state` maybe???
pub fn get_discovered(&self) -> Vec<DiscoveredPeer<TMeta>> {
self.state
.read()
.unwrap_or_else(PoisonError::into_inner)
.discovered
.get(&self.name)
.into_iter()
.flatten()
.map(|(i, p)| DiscoveredPeer {
identity: *i,
peer_id: p.peer_id,
metadata: TMeta::from_hashmap(&p.meta).unwrap(),
addresses: p.addresses.clone(),
})
.collect::<Vec<_>>()
}
pub async fn connect(
&self,
manager: Arc<Manager>,
identity: &RemoteIdentity,
) -> Result<UnicastStream, ()> {
let candidate = {
let state = self.state.read().unwrap_or_else(PoisonError::into_inner);
let (_, candidate) = state
.discovered
.get(&self.name)
.ok_or(())?
.iter()
.find(|(i, _)| *i == identity)
.ok_or(())?;
candidate.clone()
};
let stream = manager.stream_inner(candidate.peer_id).await.unwrap(); // TODO: handle providing incorrect peer id
Ok(stream)
}
pub fn listen(&self) -> ServiceSubscription<TMeta> {
ServiceSubscription {
name: self.name.clone(),
rx: BroadcastStream::new(
self.state
.read()
.unwrap_or_else(PoisonError::into_inner)
.services
.get(&self.name)
.unwrap() // TODO: Error handling
.0
.subscribe(),
),
phantom: PhantomData,
}
}
}
impl<Meta> Drop for Service<Meta> {
fn drop(&mut self) {
if self
.service_shutdown_tx
.try_send(self.name.clone())
.is_err()
{
// TODO: This will happen on shutdown due to the shutdown order. Try and fix that!
// Functionally all services are shutdown by the manager so this is a cosmetic fix.
warn!(
"Service::drop could not be called on '{}'. This indicates contention on the service shutdown channel and will result in out-of-date services being broadcasted.",
self.name
);
}
}
}
#[derive(Debug, Error)]
#[error("a service has already been mounted with this name")]
pub struct ErrDuplicateServiceName;
#[derive(Debug, Clone, Copy)]
#[cfg_attr(feature = "specta", derive(specta::Type))]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
pub enum PeerStatus {
Unavailable,
Discovered,
Connected,
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "specta", derive(specta::Type))]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
pub enum ServiceEvent<TMeta> {
Discovered {
identity: RemoteIdentity,
metadata: TMeta,
},
Expired {
identity: RemoteIdentity,
},
}
// Type-erased version of [ServiceEvent].
#[derive(Debug, Clone)]
pub(crate) enum ServiceEventInternal {
Discovered {
identity: RemoteIdentity,
metadata: HashMap<String, String>,
},
Expired {
identity: RemoteIdentity,
},
}
impl<TMeta: Metadata> TryFrom<ServiceEventInternal> for ServiceEvent<TMeta> {
type Error = String;
fn try_from(value: ServiceEventInternal) -> Result<Self, Self::Error> {
Ok(match value {
ServiceEventInternal::Discovered { identity, metadata } => Self::Discovered {
identity,
metadata: TMeta::from_hashmap(&metadata)?,
},
ServiceEventInternal::Expired { identity } => Self::Expired { identity },
})
}
}
pin_project! {
pub struct ServiceSubscription<TMeta> {
name: String,
rx: BroadcastStream<(String, ServiceEventInternal)>,
phantom: PhantomData<TMeta>,
}
}
impl<TMeta: Metadata> Stream for ServiceSubscription<TMeta> {
type Item = Result<ServiceEvent<TMeta>, BroadcastStreamRecvError>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
loop {
return match self.rx.poll_next_unpin(cx) {
Poll::Ready(Some(Ok((name, event)))) => {
if name != self.name {
continue;
}
match event.try_into() {
Ok(result) => Poll::Ready(Some(Ok(result))),
Err(err) => {
warn!("error decoding into TMeta for service '{name}': {err}");
continue; // TODO: This could *technically* cause stravation. Should this error be thrown outta the stream instead?
}
}
}
Poll::Ready(Some(Err(err))) => Poll::Ready(Some(Err(err))),
Poll::Ready(None) => Poll::Ready(None),
Poll::Pending => Poll::Pending,
};
}
}
}

View file

@ -2,59 +2,50 @@ use std::{net::SocketAddr, sync::Arc};
use crate::{
spacetime::{BroadcastStream, UnicastStream},
ConnectedPeer, DiscoveredPeer, Manager, Metadata,
spacetunnel::RemoteIdentity,
ConnectedPeer, Manager,
};
use super::PeerId;
/// represents an event coming from the network manager.
/// This is useful for updating your UI when stuff changes on the backend.
/// You can also interact with some events to cause an event.
#[derive(Debug)]
pub enum Event<TMetadata: Metadata> {
pub enum Event {
/// add a network interface on this node to listen for
AddListenAddr(SocketAddr),
/// remove a network interface from this node so that we don't listen to it
RemoveListenAddr(SocketAddr),
/// discovered peer on your local network
PeerDiscovered(DiscoveredPeer<TMetadata>),
/// a discovered peer has disappeared from the network
PeerExpired {
id: PeerId,
// Will be none if we receive the expire event without having ever seen a discover event.
metadata: Option<TMetadata>,
},
/// communication was established with a peer.
/// Theere could actually be multiple connections under the hood but we smooth it over in this API.
PeerConnected(ConnectedPeer),
/// communication was lost with a peer.
PeerDisconnected(PeerId),
PeerDisconnected(RemoteIdentity),
/// the peer has opened a new unicast substream
PeerMessage(PeerMessageEvent<TMetadata, UnicastStream>),
PeerMessage(PeerMessageEvent<UnicastStream>),
/// the peer has opened a new brodcast substream
PeerBroadcast(PeerMessageEvent<TMetadata, BroadcastStream>),
PeerBroadcast(PeerMessageEvent<BroadcastStream>),
/// the node is shutting down
Shutdown,
}
#[derive(Debug)]
pub struct PeerMessageEvent<TMetadata: Metadata, S> {
pub struct PeerMessageEvent<S> {
pub stream_id: u64,
pub peer_id: PeerId,
pub manager: Arc<Manager<TMetadata>>,
pub identity: RemoteIdentity,
pub manager: Arc<Manager>,
pub stream: S,
// Prevent manual creation by end-user
pub(crate) _priv: (),
}
impl<TMetadata: Metadata> From<PeerMessageEvent<TMetadata, UnicastStream>> for Event<TMetadata> {
fn from(event: PeerMessageEvent<TMetadata, UnicastStream>) -> Self {
impl From<PeerMessageEvent<UnicastStream>> for Event {
fn from(event: PeerMessageEvent<UnicastStream>) -> Self {
Self::PeerMessage(event)
}
}
impl<TMetadata: Metadata> From<PeerMessageEvent<TMetadata, BroadcastStream>> for Event<TMetadata> {
fn from(event: PeerMessageEvent<TMetadata, BroadcastStream>) -> Self {
impl From<PeerMessageEvent<BroadcastStream>> for Event {
fn from(event: PeerMessageEvent<BroadcastStream>) -> Self {
Self::PeerBroadcast(event)
}
}

View file

@ -17,3 +17,9 @@ pub use manager::*;
pub use manager_stream::*;
pub use peer::*;
pub use utils::*;
// TODO: Remove this
#[doc(hidden)]
pub mod internal {
pub use libp2p::PeerId;
}

View file

@ -1,5 +1,6 @@
use std::{
collections::{HashMap, HashSet},
fmt,
net::SocketAddr,
sync::{
atomic::{AtomicBool, AtomicU64},
@ -8,9 +9,9 @@ use std::{
};
use libp2p::{
core::{muxing::StreamMuxerBox, transport::ListenerId},
core::{muxing::StreamMuxerBox, transport::ListenerId, ConnectedPoint},
swarm::SwarmBuilder,
Transport,
PeerId, Transport,
};
use serde::{Deserialize, Serialize};
use specta::Type;
@ -20,8 +21,9 @@ use tracing::{error, warn};
use crate::{
spacetime::{SpaceTime, UnicastStream},
DiscoveredPeer, Keypair, ManagerStream, ManagerStreamAction, ManagerStreamAction2, Mdns,
MdnsState, Metadata, MetadataManager, PeerId,
spacetunnel::{Identity, RemoteIdentity},
DiscoveryManager, DiscoveryManagerState, Keypair, ManagerStream, ManagerStreamAction,
ManagerStreamAction2,
};
// State of the manager that may infrequently change
@ -31,50 +33,62 @@ pub(crate) struct DynamicManagerState {
pub(crate) config: ManagerConfig,
pub(crate) ipv4_listener_id: Option<ListenerId>,
pub(crate) ipv6_listener_id: Option<ListenerId>,
// A map of connected clients.
// This includes both inbound and outbound connections!
pub(crate) connected: HashMap<libp2p::PeerId, RemoteIdentity>,
// TODO: Removing this would be nice. It's a hack to things working after removing the `PeerId` from public API.
pub(crate) connections: HashMap<libp2p::PeerId, (ConnectedPoint, usize)>,
}
/// Is the core component of the P2P system that holds the state and delegates actions to the other components
#[derive(Debug)]
pub struct Manager<TMetadata: Metadata> {
pub(crate) mdns_state: Arc<MdnsState<TMetadata>>,
pub struct Manager {
pub(crate) peer_id: PeerId,
pub(crate) identity: Identity,
pub(crate) application_name: String,
pub(crate) stream_id: AtomicU64,
pub(crate) state: RwLock<DynamicManagerState>,
pub(crate) discovery_state: Arc<RwLock<DiscoveryManagerState>>,
event_stream_tx: mpsc::Sender<ManagerStreamAction>,
event_stream_tx2: mpsc::Sender<ManagerStreamAction2<TMetadata>>,
event_stream_tx2: mpsc::Sender<ManagerStreamAction2>,
}
impl<TMetadata: Metadata> Manager<TMetadata> {
impl fmt::Debug for Manager {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Debug").finish()
}
}
impl Manager {
/// create a new P2P manager. Please do your best to make the callback closures as fast as possible because they will slow the P2P event loop!
pub async fn new(
application_name: &'static str,
keypair: &Keypair,
config: ManagerConfig,
metadata_manager: Arc<MetadataManager<TMetadata>>,
) -> Result<(Arc<Self>, ManagerStream<TMetadata>), ManagerError> {
) -> Result<(Arc<Self>, ManagerStream), ManagerError> {
application_name
.chars()
.all(|c| char::is_alphanumeric(c) || c == '-')
.then_some(())
.ok_or(ManagerError::InvalidAppName)?;
let peer_id = PeerId(keypair.raw_peer_id());
let peer_id = keypair.peer_id();
let (event_stream_tx, event_stream_rx) = mpsc::channel(128);
let (event_stream_tx2, event_stream_rx2) = mpsc::channel(128);
let (mdns, mdns_state) = Mdns::new(application_name, peer_id, metadata_manager)
.await
.unwrap();
let config2 = config.clone();
let (discovery_state, service_shutdown_rx) = DiscoveryManagerState::new();
let this = Arc::new(Self {
mdns_state,
application_name: format!("/{}/spacetime/1.0.0", application_name),
identity: keypair.to_identity(),
stream_id: AtomicU64::new(0),
state: RwLock::new(DynamicManagerState {
config,
ipv4_listener_id: None,
ipv6_listener_id: None,
connected: Default::default(),
connections: Default::default(),
}),
discovery_state,
peer_id,
event_stream_tx,
event_stream_tx2,
@ -87,7 +101,7 @@ impl<TMetadata: Metadata> Manager<TMetadata> {
.map(|(p, c), _| (p, StreamMuxerBox::new(c)))
.boxed(),
SpaceTime::new(this.clone()),
keypair.raw_peer_id(),
keypair.peer_id(),
)
.build();
@ -99,11 +113,18 @@ impl<TMetadata: Metadata> Manager<TMetadata> {
Ok((
this.clone(),
ManagerStream {
discovery_manager: DiscoveryManager::new(
application_name,
this.identity.to_remote_identity(),
this.peer_id,
&config2,
this.discovery_state.clone(),
service_shutdown_rx,
)?,
manager: this,
event_stream_rx,
event_stream_rx2,
swarm,
mdns,
queued_events: Default::default(),
shutdown: AtomicBool::new(false),
on_establish_streams: HashMap::new(),
@ -118,29 +139,19 @@ impl<TMetadata: Metadata> Manager<TMetadata> {
}
}
pub fn peer_id(&self) -> PeerId {
self.peer_id
pub fn identity(&self) -> RemoteIdentity {
self.identity.to_remote_identity()
}
pub async fn listen_addrs(&self) -> HashSet<SocketAddr> {
self.mdns_state.listen_addrs.read().await.clone()
pub fn libp2p_peer_id(&self) -> PeerId {
self.peer_id
}
pub async fn update_config(&self, config: ManagerConfig) {
self.emit(ManagerStreamAction::UpdateConfig(config)).await;
}
pub async fn get_discovered_peers(&self) -> Vec<DiscoveredPeer<TMetadata>> {
self.mdns_state
.discovered
.read()
.await
.values()
.cloned()
.collect()
}
pub async fn get_connected_peers(&self) -> Result<Vec<PeerId>, ()> {
pub async fn get_connected_peers(&self) -> Result<Vec<RemoteIdentity>, ()> {
let (tx, rx) = oneshot::channel();
self.emit(ManagerStreamAction::GetConnectedPeers(tx)).await;
rx.await.map_err(|_| {
@ -148,10 +159,32 @@ impl<TMetadata: Metadata> Manager<TMetadata> {
})
}
// TODO: Maybe remove this?
pub async fn stream(&self, identity: RemoteIdentity) -> Result<UnicastStream, ()> {
let peer_id = {
let state = self
.discovery_state
.read()
.unwrap_or_else(PoisonError::into_inner);
// TODO: This should not depend on a `Service` existing. Either we should store discovered peers separatly for this or we should remove this method (prefered).
state
.discovered
.iter()
.find_map(|(_, i)| i.iter().find(|(i, _)| **i == identity))
.ok_or(())?
.1
.peer_id
};
self.stream_inner(peer_id).await
}
// TODO: Should this be private now that connections can be done through the `Service`.
// TODO: Does this need any timeouts to be added cause hanging forever is bad?
// be aware this method is `!Sync` so can't be used from rspc. // TODO: Can this limitation be removed?
#[allow(clippy::unused_unit)] // TODO: Remove this clippy override once error handling is added
pub async fn stream(&self, peer_id: PeerId) -> Result<UnicastStream, ()> {
pub(crate) async fn stream_inner(&self, peer_id: PeerId) -> Result<UnicastStream, ()> {
// TODO: With this system you can send to any random peer id. Can I reduce that by requiring `.connect(peer_id).unwrap().send(data)` or something like that.
let (tx, rx) = oneshot::channel();
match self
@ -162,28 +195,83 @@ impl<TMetadata: Metadata> Manager<TMetadata> {
Ok(_) => {}
Err(err) => warn!("error emitting event: {}", err),
}
let mut stream = rx.await.map_err(|_| {
let stream = rx.await.map_err(|_| {
warn!("failed to queue establishing stream to peer '{peer_id}'!");
()
})?;
stream.write_discriminator().await.unwrap(); // TODO: Error handling
Ok(stream)
Ok(stream.build(self, peer_id).await)
}
pub async fn broadcast(&self, data: Vec<u8>) {
self.emit(ManagerStreamAction::BroadcastData(data)).await;
}
// TODO: Cleanup return type and this API in general
#[allow(clippy::type_complexity)]
pub fn get_debug_state(
&self,
) -> (
PeerId,
RemoteIdentity,
ManagerConfig,
HashMap<PeerId, RemoteIdentity>,
HashSet<PeerId>,
HashMap<String, Option<HashMap<String, String>>>,
HashMap<
String,
HashMap<RemoteIdentity, (PeerId, HashMap<String, String>, Vec<SocketAddr>)>,
>,
HashMap<String, HashSet<RemoteIdentity>>,
) {
let state = self.state.read().unwrap_or_else(PoisonError::into_inner);
let discovery_state = self
.discovery_state
.read()
.unwrap_or_else(PoisonError::into_inner);
(
self.peer_id,
self.identity.to_remote_identity(),
state.config.clone(),
state.connected.clone(),
state.connections.keys().copied().collect(),
discovery_state
.services
.iter()
.map(|(k, v)| (k.clone(), v.1.clone()))
.collect(),
discovery_state
.discovered
.iter()
.map(|(k, v)| {
(
k.clone(),
v.clone()
.iter()
.map(|(k, v)| (*k, (v.peer_id, v.meta.clone(), v.addresses.clone())))
.collect::<HashMap<_, _>>(),
)
})
.collect(),
discovery_state.known.clone(),
)
}
pub async fn shutdown(&self) {
let (tx, rx) = oneshot::channel();
self.event_stream_tx
if self
.event_stream_tx
.send(ManagerStreamAction::Shutdown(tx))
.await
.unwrap();
rx.await.unwrap_or_else(|_| {
warn!("Error receiving shutdown signal to P2P Manager!");
}); // Await shutdown so we don't kill the app before the Mdns broadcast
.is_ok()
{
rx.await.unwrap_or_else(|_| {
warn!("Error receiving shutdown signal to P2P Manager!");
}); // Await shutdown so we don't kill the app before the Mdns broadcast
} else {
warn!("p2p was already shutdown, skipping...");
}
}
}

View file

@ -1,5 +1,5 @@
use std::{
collections::{HashMap, VecDeque},
collections::{HashMap, HashSet, VecDeque},
fmt,
net::{Ipv4Addr, Ipv6Addr, SocketAddr},
sync::{
@ -14,15 +14,16 @@ use libp2p::{
dial_opts::{DialOpts, PeerCondition},
NotifyHandler, SwarmEvent, ToSwarm,
},
Swarm,
PeerId, Swarm,
};
use tokio::sync::{mpsc, oneshot};
use tracing::{debug, error, info, trace, warn};
use crate::{
quic_multiaddr_to_socketaddr, socketaddr_to_quic_multiaddr,
spacetime::{OutboundRequest, SpaceTime, UnicastStream},
DynamicManagerState, Event, Manager, ManagerConfig, Mdns, Metadata, PeerId,
spacetime::{OutboundRequest, SpaceTime, UnicastStreamBuilder},
spacetunnel::RemoteIdentity,
DiscoveryManager, DynamicManagerState, Event, Manager, ManagerConfig, Mdns,
};
/// TODO
@ -30,7 +31,7 @@ use crate::{
/// This is `Sync` so it can be used from within rspc.
pub enum ManagerStreamAction {
/// TODO
GetConnectedPeers(oneshot::Sender<Vec<PeerId>>),
GetConnectedPeers(oneshot::Sender<Vec<RemoteIdentity>>),
/// Tell the [`libp2p::Swarm`](libp2p::Swarm) to establish a new connection to a peer.
Dial {
peer_id: PeerId,
@ -47,11 +48,13 @@ pub enum ManagerStreamAction {
/// TODO: Get ride of this and merge into `ManagerStreamAction` without breaking rspc procedures
///
/// This is `!Sync` so can't be used from within rspc.
pub enum ManagerStreamAction2<TMetadata: Metadata> {
pub enum ManagerStreamAction2 {
/// Events are returned to the application via the `ManagerStream::next` method.
Event(Event<TMetadata>),
Event(Event),
/// Events are returned to the application via the `ManagerStream::next` method.
Events(Vec<Event>),
/// TODO
StartStream(PeerId, oneshot::Sender<UnicastStream>),
StartStream(PeerId, oneshot::Sender<UnicastStreamBuilder>),
}
impl fmt::Debug for ManagerStreamAction {
@ -60,37 +63,35 @@ impl fmt::Debug for ManagerStreamAction {
}
}
impl<TMetadata: Metadata> fmt::Debug for ManagerStreamAction2<TMetadata> {
impl fmt::Debug for ManagerStreamAction2 {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("ManagerStreamAction2")
}
}
impl<TMetadata: Metadata> From<Event<TMetadata>> for ManagerStreamAction2<TMetadata> {
fn from(event: Event<TMetadata>) -> Self {
impl From<Event> for ManagerStreamAction2 {
fn from(event: Event) -> Self {
Self::Event(event)
}
}
/// TODO
pub struct ManagerStream<TMetadata: Metadata> {
pub(crate) manager: Arc<Manager<TMetadata>>,
#[must_use = "streams do nothing unless polled"]
pub struct ManagerStream {
pub(crate) manager: Arc<Manager>,
pub(crate) event_stream_rx: mpsc::Receiver<ManagerStreamAction>,
pub(crate) event_stream_rx2: mpsc::Receiver<ManagerStreamAction2<TMetadata>>,
pub(crate) swarm: Swarm<SpaceTime<TMetadata>>,
pub(crate) mdns: Mdns<TMetadata>,
pub(crate) queued_events: VecDeque<Event<TMetadata>>,
pub(crate) event_stream_rx2: mpsc::Receiver<ManagerStreamAction2>,
pub(crate) swarm: Swarm<SpaceTime>,
pub(crate) discovery_manager: DiscoveryManager,
pub(crate) queued_events: VecDeque<Event>,
pub(crate) shutdown: AtomicBool,
pub(crate) on_establish_streams: HashMap<libp2p::PeerId, Vec<OutboundRequest>>,
}
impl<TMetadata: Metadata> ManagerStream<TMetadata> {
impl ManagerStream {
/// Setup the libp2p listeners based on the manager config.
/// This method will take care of removing old listeners if needed
pub(crate) fn refresh_listeners(
swarm: &mut Swarm<SpaceTime<TMetadata>>,
state: &mut DynamicManagerState,
) {
pub(crate) fn refresh_listeners(swarm: &mut Swarm<SpaceTime>, state: &mut DynamicManagerState) {
if state.config.enabled {
let port = state.config.port.unwrap_or(0);
@ -133,31 +134,30 @@ impl<TMetadata: Metadata> ManagerStream<TMetadata> {
}
}
enum EitherManagerStreamAction<TMetadata: Metadata> {
enum EitherManagerStreamAction {
A(ManagerStreamAction),
B(ManagerStreamAction2<TMetadata>),
B(ManagerStreamAction2),
}
impl<TMetadata: Metadata> From<ManagerStreamAction> for EitherManagerStreamAction<TMetadata> {
impl From<ManagerStreamAction> for EitherManagerStreamAction {
fn from(event: ManagerStreamAction) -> Self {
Self::A(event)
}
}
impl<TMetadata: Metadata> From<ManagerStreamAction2<TMetadata>>
for EitherManagerStreamAction<TMetadata>
{
fn from(event: ManagerStreamAction2<TMetadata>) -> Self {
impl From<ManagerStreamAction2> for EitherManagerStreamAction {
fn from(event: ManagerStreamAction2) -> Self {
Self::B(event)
}
}
impl<TMetadata> ManagerStream<TMetadata>
where
TMetadata: Metadata,
{
impl ManagerStream {
pub fn listen_addrs(&self) -> HashSet<SocketAddr> {
self.discovery_manager.listen_addrs.clone()
}
// Your application should keep polling this until `None` is received or the P2P system will be halted.
pub async fn next(&mut self) -> Option<Event<TMetadata>> {
pub async fn next(&mut self) -> Option<Event> {
// We loop polling internal services until an event comes in that needs to be sent to the parent application.
loop {
if self.shutdown.load(Ordering::Relaxed) {
@ -167,12 +167,8 @@ where
if let Some(event) = self.queued_events.pop_front() {
return Some(event);
}
tokio::select! {
event = self.mdns.poll(&self.manager) => {
if let Some(event) = event {
return Some(event);
}
_ = self.discovery_manager.poll() => {
continue;
},
event = self.event_stream_rx.recv() => {
@ -212,7 +208,17 @@ where
}
}
},
SwarmEvent::ConnectionClosed { .. } => {},
SwarmEvent::ConnectionClosed { peer_id, num_established, .. } => {
if num_established == 0 {
let mut state = self.manager.state.write()
.unwrap_or_else(PoisonError::into_inner);
if state
.connected
.remove(&peer_id).is_none() || state.connections.remove(&peer_id).is_none() {
warn!("unable to remove unconnected client from connected map. This indicates a bug!");
}
}
},
SwarmEvent::IncomingConnection { local_addr, .. } => debug!("incoming connection from '{}'", local_addr),
SwarmEvent::IncomingConnectionError { local_addr, error, .. } => warn!("handshake error with incoming connection from '{}': {}", local_addr, error),
SwarmEvent::OutgoingConnectionError { peer_id, error, .. } => warn!("error establishing connection with '{:?}': {}", peer_id, error),
@ -220,7 +226,8 @@ where
match quic_multiaddr_to_socketaddr(address) {
Ok(addr) => {
trace!("listen address added: {}", addr);
self.mdns.register_addr(addr).await;
self.discovery_manager.listen_addrs.insert(addr);
self.discovery_manager.do_advertisement();
return Some(Event::AddListenAddr(addr));
},
Err(err) => {
@ -233,7 +240,8 @@ where
match quic_multiaddr_to_socketaddr(address) {
Ok(addr) => {
trace!("listen address expired: {}", addr);
self.mdns.unregister_addr(&addr).await;
self.discovery_manager.listen_addrs.remove(&addr);
self.discovery_manager.do_advertisement();
return Some(Event::RemoveListenAddr(addr));
},
Err(err) => {
@ -248,8 +256,7 @@ where
match quic_multiaddr_to_socketaddr(address) {
Ok(addr) => {
trace!("listen address closed: {}", addr);
self.mdns.unregister_addr(&addr).await;
self.discovery_manager.listen_addrs.remove(&addr);
self.queued_events.push_back(Event::RemoveListenAddr(addr));
},
Err(err) => {
@ -271,18 +278,34 @@ where
async fn handle_manager_stream_action(
&mut self,
event: EitherManagerStreamAction<TMetadata>,
) -> Option<Event<TMetadata>> {
event: EitherManagerStreamAction,
) -> Option<Event> {
match event {
EitherManagerStreamAction::A(event) => match event {
ManagerStreamAction::GetConnectedPeers(response) => {
let result = {
let state = self
.manager
.state
.read()
.unwrap_or_else(PoisonError::into_inner);
self.swarm
.connected_peers()
.filter_map(|v| {
let v = state.connected.get(v);
if v.is_none() {
warn!("Error converting PeerId({v:?}) into RemoteIdentity. This is likely a bug in P2P.")
}
v.copied()
})
.collect::<Vec<_>>()
};
response
.send(
self.swarm
.connected_peers()
.map(|v| PeerId(*v))
.collect::<Vec<_>>(),
)
.send(result)
.map_err(|_| {
error!("Error sending response to `GetConnectedPeers` request! Sending was dropped!")
})
@ -290,7 +313,7 @@ where
}
ManagerStreamAction::Dial { peer_id, addresses } => {
match self.swarm.dial(
DialOpts::peer_id(peer_id.0)
DialOpts::peer_id(peer_id)
.condition(PeerCondition::Disconnected)
.addresses(addresses.iter().map(socketaddr_to_quic_multiaddr).collect())
.build(),
@ -324,11 +347,36 @@ where
state.config = config;
ManagerStream::refresh_listeners(&mut self.swarm, &mut state);
drop(state);
if !state.config.enabled {
if let Some(mdns) = self.discovery_manager.mdns.take() {
drop(state);
mdns.shutdown();
}
} else if self.discovery_manager.mdns.is_none() {
match Mdns::new(
self.discovery_manager.application_name,
self.discovery_manager.identity,
self.discovery_manager.peer_id,
) {
Ok(mdns) => {
self.discovery_manager.mdns = Some(mdns);
self.discovery_manager.do_advertisement()
}
Err(err) => {
error!("error starting mDNS service: {err:?}");
self.discovery_manager.mdns = None;
// state.config.enabled = false;
// TODO: Properly reset the UI state cause it will be outa sync
}
}
}
// drop(state);
}
ManagerStreamAction::Shutdown(tx) => {
info!("Shutting down P2P Manager...");
self.mdns.shutdown().await;
self.discovery_manager.shutdown();
tx.send(()).unwrap_or_else(|_| {
warn!("Error sending shutdown signal to P2P Manager!");
});
@ -338,21 +386,33 @@ where
},
EitherManagerStreamAction::B(event) => match event {
ManagerStreamAction2::Event(event) => return Some(event),
ManagerStreamAction2::Events(mut events) => {
let first = events.pop();
for event in events {
self.queued_events.push_back(event);
}
return first;
}
ManagerStreamAction2::StartStream(peer_id, tx) => {
if !self.swarm.connected_peers().any(|v| *v == peer_id.0) {
if !self.swarm.connected_peers().any(|v| *v == peer_id) {
let addresses = self
.mdns
.discovery_manager
.state
.discovered
.read()
.await
.get(&peer_id)
.unwrap()
.addresses
.clone();
.unwrap_or_else(PoisonError::into_inner)
.discovered
.iter()
.find_map(|(_, service)| {
service.iter().find_map(|(_, v)| {
(v.peer_id == peer_id).then(|| v.addresses.clone())
})
})
.unwrap(); // TODO: Error handling
match self.swarm.dial(
DialOpts::peer_id(peer_id.0)
DialOpts::peer_id(peer_id)
.condition(PeerCondition::Disconnected)
.addresses(
addresses.iter().map(socketaddr_to_quic_multiaddr).collect(),
@ -367,13 +427,13 @@ where
}
self.on_establish_streams
.entry(peer_id.0)
.entry(peer_id)
.or_default()
.push(OutboundRequest::Unicast(tx));
} else {
self.swarm.behaviour_mut().pending_events.push_back(
ToSwarm::NotifyHandler {
peer_id: peer_id.0,
peer_id,
handler: NotifyHandler::Any,
event: OutboundRequest::Unicast(tx),
},

View file

@ -1,29 +1,31 @@
use std::{
fmt::{self, Formatter},
net::SocketAddr,
sync::Arc,
};
use crate::{Manager, ManagerStreamAction, Metadata, PeerId};
use libp2p::PeerId;
use crate::{spacetunnel::RemoteIdentity, Metadata};
/// Represents a discovered peer.
/// This is held by [Manager] to keep track of discovered peers
#[derive(Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
#[cfg_attr(feature = "specta", derive(specta::Type))]
pub struct DiscoveredPeer<TMetadata: Metadata> {
#[cfg_attr(any(feature = "serde", feature = "specta"), serde(skip))]
pub(crate) manager: Arc<Manager<TMetadata>>,
/// get the peer id of the discovered peer
pub struct DiscoveredPeer<TMeta: Metadata> {
/// the public key of the discovered peer
pub identity: RemoteIdentity,
/// the libp2p peer id of the discovered peer
#[serde(skip)]
pub peer_id: PeerId,
/// get the metadata of the discovered peer
pub metadata: TMetadata,
pub metadata: TMeta,
/// get the addresses of the discovered peer
pub addresses: Vec<SocketAddr>,
}
// `Manager` impls `Debug` but it causes infinite loop and stack overflow, lmao.
impl<TMetadata: Metadata> fmt::Debug for DiscoveredPeer<TMetadata> {
impl<TMeta: Metadata> fmt::Debug for DiscoveredPeer<TMeta> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("DiscoveredPeer")
.field("peer_id", &self.peer_id)
@ -33,26 +35,14 @@ impl<TMetadata: Metadata> fmt::Debug for DiscoveredPeer<TMetadata> {
}
}
impl<TMetadata: Metadata> DiscoveredPeer<TMetadata> {
/// dial will queue an event to start a connection with the peer
pub async fn dial(self) {
self.manager
.emit(ManagerStreamAction::Dial {
peer_id: self.peer_id,
addresses: self.addresses,
})
.await;
}
}
/// Represents a connected peer.
/// This is held by [Manager] to keep track of connected peers
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
#[cfg_attr(feature = "specta", derive(specta::Type))]
pub struct ConnectedPeer {
/// get the peer id of the discovered peer
pub peer_id: PeerId,
/// get the identity of the discovered peer
pub identity: RemoteIdentity,
/// Did I open the connection?
pub establisher: bool,
}

View file

@ -1,6 +1,6 @@
use std::{
collections::VecDeque,
sync::Arc,
sync::{Arc, PoisonError},
task::{Context, Poll},
};
@ -14,9 +14,9 @@ use libp2p::{
Multiaddr,
};
use thiserror::Error;
use tracing::debug;
use tracing::{debug, trace};
use crate::{ConnectedPeer, Event, Manager, ManagerStreamAction2, Metadata, PeerId};
use crate::{Event, Manager, ManagerStreamAction2};
use super::SpaceTimeConnection;
@ -32,28 +32,25 @@ pub enum OutboundFailure {}
/// SpaceTime is a [`NetworkBehaviour`](libp2p_swarm::NetworkBehaviour) that implements the SpaceTime protocol.
/// This protocol sits under the application to abstract many complexities of 2 way connections and deals with authentication, chucking, etc.
pub struct SpaceTime<TMetadata: Metadata> {
pub(crate) manager: Arc<Manager<TMetadata>>,
pub struct SpaceTime {
pub(crate) manager: Arc<Manager>,
pub(crate) pending_events:
VecDeque<ToSwarm<<Self as NetworkBehaviour>::ToSwarm, THandlerInEvent<Self>>>,
// For future me's sake, DON't try and refactor this to use shared state (for the nth time), it doesn't fit into libp2p's synchronous trait and polling model!!!
// pub(crate) connected_peers: HashMap<PeerId, ConnectedPeer>,
}
impl<TMetadata: Metadata> SpaceTime<TMetadata> {
impl SpaceTime {
/// intialise the fabric of space time
pub fn new(manager: Arc<Manager<TMetadata>>) -> Self {
pub fn new(manager: Arc<Manager>) -> Self {
Self {
manager,
pending_events: VecDeque::new(),
// connected_peers: HashMap::new(),
}
}
}
impl<TMetadata: Metadata> NetworkBehaviour for SpaceTime<TMetadata> {
type ConnectionHandler = SpaceTimeConnection<TMetadata>;
type ToSwarm = ManagerStreamAction2<TMetadata>;
impl NetworkBehaviour for SpaceTime {
type ConnectionHandler = SpaceTimeConnection;
type ToSwarm = ManagerStreamAction2;
fn handle_established_inbound_connection(
&mut self,
@ -62,10 +59,7 @@ impl<TMetadata: Metadata> NetworkBehaviour for SpaceTime<TMetadata> {
_local_addr: &Multiaddr,
_remote_addr: &Multiaddr,
) -> Result<THandler<Self>, ConnectionDenied> {
Ok(SpaceTimeConnection::new(
PeerId(peer_id),
self.manager.clone(),
))
Ok(SpaceTimeConnection::new(peer_id, self.manager.clone()))
}
fn handle_pending_outbound_connection(
@ -86,10 +80,7 @@ impl<TMetadata: Metadata> NetworkBehaviour for SpaceTime<TMetadata> {
_addr: &Multiaddr,
_role_override: Endpoint,
) -> Result<THandler<Self>, ConnectionDenied> {
Ok(SpaceTimeConnection::new(
PeerId(peer_id),
self.manager.clone(),
))
Ok(SpaceTimeConnection::new(peer_id, self.manager.clone()))
}
fn on_swarm_event(&mut self, event: FromSwarm<Self::ConnectionHandler>) {
@ -104,40 +95,33 @@ impl<TMetadata: Metadata> NetworkBehaviour for SpaceTime<TMetadata> {
ConnectedPoint::Dialer { address, .. } => Some(address.clone()),
ConnectedPoint::Listener { .. } => None,
};
debug!(
"connection established with peer '{}' found at '{:?}'; peer has {} active connections",
trace!(
"connection establishing with peer '{}' found at '{:?}'; peer has {} active connections",
peer_id, address, other_established
);
let peer_id = PeerId(peer_id);
// TODO: Move this block onto into `connection.rs` -> will probs be required for the ConnectionEstablishmentPayload stuff
{
debug!("sending establishment request to peer '{}'", peer_id);
if other_established == 0 {
self.pending_events.push_back(ToSwarm::GenerateEvent(
Event::PeerConnected(ConnectedPeer {
peer_id,
establisher: match endpoint {
ConnectedPoint::Dialer { .. } => true,
ConnectedPoint::Listener { .. } => false,
},
})
.into(),
));
}
}
self.manager
.state
.write()
.unwrap_or_else(PoisonError::into_inner)
.connections
.insert(peer_id, (endpoint.clone(), other_established));
}
FromSwarm::ConnectionClosed(ConnectionClosed {
peer_id,
remaining_established,
..
}) => {
let peer_id = PeerId(peer_id);
if remaining_established == 0 {
debug!("Disconnected from peer '{}'", peer_id);
let mut state = self
.manager
.state
.write()
.unwrap_or_else(PoisonError::into_inner);
state.connections.remove(&peer_id);
self.pending_events.push_back(ToSwarm::GenerateEvent(
Event::PeerDisconnected(peer_id).into(),
Event::PeerDisconnected(state.connected.remove(&peer_id).unwrap()).into(),
));
}
}

View file

@ -1,9 +1,12 @@
use libp2p::swarm::{
handler::{
ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, FullyNegotiatedInbound,
KeepAlive,
use libp2p::{
swarm::{
handler::{
ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, FullyNegotiatedInbound,
KeepAlive,
},
StreamUpgradeError, SubstreamProtocol,
},
StreamUpgradeError, SubstreamProtocol,
PeerId,
};
use std::{
collections::VecDeque,
@ -14,7 +17,7 @@ use std::{
};
use tracing::error;
use crate::{Manager, ManagerStreamAction2, Metadata, PeerId};
use crate::{Manager, ManagerStreamAction2};
use super::{InboundProtocol, OutboundProtocol, OutboundRequest, EMPTY_QUEUE_SHRINK_THRESHOLD};
@ -22,9 +25,9 @@ use super::{InboundProtocol, OutboundProtocol, OutboundRequest, EMPTY_QUEUE_SHRI
const SUBSTREAM_TIMEOUT: Duration = Duration::from_secs(10); // TODO: Tune value
#[allow(clippy::type_complexity)]
pub struct SpaceTimeConnection<TMetadata: Metadata> {
pub struct SpaceTimeConnection {
peer_id: PeerId,
manager: Arc<Manager<TMetadata>>,
manager: Arc<Manager>,
pending_events: VecDeque<
ConnectionHandlerEvent<
OutboundProtocol,
@ -35,8 +38,8 @@ pub struct SpaceTimeConnection<TMetadata: Metadata> {
>,
}
impl<TMetadata: Metadata> SpaceTimeConnection<TMetadata> {
pub(super) fn new(peer_id: PeerId, manager: Arc<Manager<TMetadata>>) -> Self {
impl SpaceTimeConnection {
pub(super) fn new(peer_id: PeerId, manager: Arc<Manager>) -> Self {
Self {
peer_id,
manager,
@ -45,13 +48,11 @@ impl<TMetadata: Metadata> SpaceTimeConnection<TMetadata> {
}
}
// pub enum Connection
impl<TMetadata: Metadata> ConnectionHandler for SpaceTimeConnection<TMetadata> {
impl ConnectionHandler for SpaceTimeConnection {
type FromBehaviour = OutboundRequest;
type ToBehaviour = ManagerStreamAction2<TMetadata>;
type ToBehaviour = ManagerStreamAction2;
type Error = StreamUpgradeError<io::Error>;
type InboundProtocol = InboundProtocol<TMetadata>;
type InboundProtocol = InboundProtocol;
type OutboundProtocol = OutboundProtocol;
type OutboundOpenInfo = ();
type InboundOpenInfo = ();
@ -75,7 +76,11 @@ impl<TMetadata: Metadata> ConnectionHandler for SpaceTimeConnection<TMetadata> {
self.pending_events
.push_back(ConnectionHandlerEvent::OutboundSubstreamRequest {
protocol: SubstreamProtocol::new(
OutboundProtocol(self.manager.application_name.clone(), req),
OutboundProtocol {
application_name: self.manager.application_name.clone(),
req,
identity: self.manager.identity.clone(),
},
(),
) // TODO: Use `info` here maybe to pass into about the client. Idk?
.with_timeout(SUBSTREAM_TIMEOUT),

View file

@ -1,27 +1,29 @@
use std::{
future::Future,
pin::Pin,
sync::{atomic::Ordering, Arc},
sync::{atomic::Ordering, Arc, PoisonError},
};
use libp2p::{core::UpgradeInfo, InboundUpgrade, Stream};
use libp2p::{
core::{ConnectedPoint, UpgradeInfo},
InboundUpgrade, PeerId, Stream,
};
use tokio::io::AsyncReadExt;
use tokio_util::compat::FuturesAsyncReadCompatExt;
use tracing::debug;
use tracing::{debug, warn};
use crate::{
spacetime::{BroadcastStream, UnicastStream},
Manager, ManagerStreamAction2, Metadata, PeerId, PeerMessageEvent,
spacetime::UnicastStream, ConnectedPeer, Event, Manager, ManagerStreamAction2, PeerMessageEvent,
};
use super::SpaceTimeProtocolName;
pub struct InboundProtocol<TMetadata: Metadata> {
pub struct InboundProtocol {
pub(crate) peer_id: PeerId,
pub(crate) manager: Arc<Manager<TMetadata>>,
pub(crate) manager: Arc<Manager>,
}
impl<TMetadata: Metadata> UpgradeInfo for InboundProtocol<TMetadata> {
impl UpgradeInfo for InboundProtocol {
type Info = SpaceTimeProtocolName;
type InfoIter = [Self::Info; 1];
@ -30,8 +32,8 @@ impl<TMetadata: Metadata> UpgradeInfo for InboundProtocol<TMetadata> {
}
}
impl<TMetadata: Metadata> InboundUpgrade<Stream> for InboundProtocol<TMetadata> {
type Output = ManagerStreamAction2<TMetadata>;
impl InboundUpgrade<Stream> for InboundProtocol {
type Output = ManagerStreamAction2;
type Error = ();
type Future = Pin<Box<dyn Future<Output = Result<Self::Output, Self::Error>> + Send + 'static>>;
@ -48,30 +50,71 @@ impl<TMetadata: Metadata> InboundUpgrade<Stream> for InboundProtocol<TMetadata>
match discriminator {
crate::spacetime::BROADCAST_DISCRIMINATOR => {
debug!("stream({}, {id}): broadcast stream accepted", self.peer_id);
Ok(ManagerStreamAction2::Event(
PeerMessageEvent {
stream_id: id,
peer_id: self.peer_id,
manager: self.manager.clone(),
stream: BroadcastStream::new(io),
_priv: (),
}
.into(),
))
// Ok(ManagerStreamAction2::Event(
// PeerMessageEvent {
// stream_id: id,
// identity: self.identity,
// manager: self.manager.clone(),
// stream: BroadcastStream::new(io),
// _priv: (),
// }
// .into(),
// ))
todo!("Broadcast's are cringe!");
}
crate::spacetime::UNICAST_DISCRIMINATOR => {
debug!("stream({}, {id}): unicast stream accepted", self.peer_id);
Ok(ManagerStreamAction2::Event(
PeerMessageEvent {
stream_id: id,
peer_id: self.peer_id,
manager: self.manager.clone(),
stream: UnicastStream::new(io),
_priv: (),
let stream =
UnicastStream::new_inbound(self.manager.identity.clone(), io).await;
let establisher = {
let mut state = self
.manager
.state
.write()
.unwrap_or_else(PoisonError::into_inner);
state
.connected
.insert(self.peer_id, stream.remote_identity());
match state.connections.get(&self.peer_id) {
Some((endpoint, 0)) => Some(match endpoint {
ConnectedPoint::Dialer { .. } => true,
ConnectedPoint::Listener { .. } => false,
}),
None => {
warn!("Error getting PeerId({})'s connection state. This indicates a bug in P2P", self.peer_id);
None
}
_ => None,
}
.into(),
))
};
debug!(
"sending establishment request to peer '{}'",
stream.remote_identity()
);
let identity = stream.remote_identity();
let mut events = vec![PeerMessageEvent {
stream_id: id,
identity,
manager: self.manager.clone(),
stream,
_priv: (),
}
.into()];
if let Some(establisher) = establisher {
events.push(Event::PeerConnected(ConnectedPeer {
identity,
establisher,
}));
}
Ok(ManagerStreamAction2::Events(events))
}
_ => todo!(), // TODO: Error handling
}

View file

@ -12,22 +12,28 @@ use tokio::sync::oneshot;
use tokio_util::compat::FuturesAsyncReadCompatExt;
use tracing::error;
use super::{SpaceTimeProtocolName, UnicastStream, BROADCAST_DISCRIMINATOR};
use crate::spacetunnel::Identity;
use super::{SpaceTimeProtocolName, UnicastStreamBuilder, BROADCAST_DISCRIMINATOR};
#[derive(Debug)]
pub enum OutboundRequest {
Broadcast(Vec<u8>),
Unicast(oneshot::Sender<UnicastStream>),
Unicast(oneshot::Sender<UnicastStreamBuilder>),
}
pub struct OutboundProtocol(pub(crate) String, pub(crate) OutboundRequest);
pub struct OutboundProtocol {
pub(crate) application_name: String,
pub(crate) req: OutboundRequest,
pub(crate) identity: Identity,
}
impl UpgradeInfo for OutboundProtocol {
type Info = SpaceTimeProtocolName;
type InfoIter = [Self::Info; 1];
fn protocol_info(&self) -> Self::InfoIter {
[SpaceTimeProtocolName(self.0.clone())]
[SpaceTimeProtocolName(self.application_name.clone())]
}
}
@ -37,7 +43,7 @@ impl OutboundUpgrade<Stream> for OutboundProtocol {
type Future = Ready<Result<(), ()>>;
fn upgrade_outbound(self, mut io: Stream, _protocol: Self::Info) -> Self::Future {
match self.1 {
match self.req {
OutboundRequest::Broadcast(data) => {
tokio::spawn(async move {
io.write_all(&[BROADCAST_DISCRIMINATOR]).await.unwrap();
@ -62,7 +68,12 @@ impl OutboundUpgrade<Stream> for OutboundProtocol {
}
OutboundRequest::Unicast(sender) => {
// We write the discriminator to the stream in the `Manager::stream` method before returning the stream to the user to make async a tad nicer.
sender.send(UnicastStream::new(io.compat())).unwrap();
sender
.send(UnicastStreamBuilder::new(
self.identity.clone(),
io.compat(),
))
.unwrap();
}
}

View file

@ -1,22 +1,33 @@
use std::{
io::{self, ErrorKind},
pin::Pin,
sync::PoisonError,
task::{Context, Poll},
};
use libp2p::{futures::AsyncWriteExt, Stream};
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt as TokioAsyncWriteExt, ReadBuf};
use libp2p::{futures::AsyncWriteExt, PeerId, Stream};
use tokio::io::{
AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt as TokioAsyncWriteExt, ReadBuf,
};
use tokio_util::compat::Compat;
use crate::{
spacetunnel::{Identity, RemoteIdentity, REMOTE_IDENTITY_LEN},
Manager,
};
pub const BROADCAST_DISCRIMINATOR: u8 = 0;
pub const UNICAST_DISCRIMINATOR: u8 = 1;
pub const CHALLENGE_LENGTH: usize = 32;
/// A broadcast is a message sent to many peers in the network.
/// Due to this it is not possible to respond to a broadcast.
#[derive(Debug)]
pub struct BroadcastStream(Option<Compat<Stream>>);
impl BroadcastStream {
#[allow(unused)]
pub(crate) fn new(stream: Compat<Stream>) -> Self {
Self(Some(stream))
}
@ -56,22 +67,79 @@ impl Drop for BroadcastStream {
/// A unicast stream is a direct stream to a specific peer.
#[derive(Debug)]
pub struct UnicastStream(Compat<Stream>);
#[allow(unused)] // TODO: Remove this lint override
pub struct UnicastStream {
io: Compat<Stream>,
me: Identity,
remote: RemoteIdentity,
}
// TODO: Utils for sending msgpack and stuff over the stream. -> Have a max size of reading buffers so we are less susceptible to DoS attacks.
impl UnicastStream {
pub(crate) fn new(io: Compat<Stream>) -> Self {
Self(io)
pub(crate) async fn new_inbound(identity: Identity, mut io: Compat<Stream>) -> Self {
// TODO: Finish this
// let mut challenge = [0u8; CHALLENGE_LENGTH];
// io.read_exact(&mut challenge).await.unwrap(); // TODO: Timeout
// let nonce = ChaCha20Poly1305::generate_nonce(&mut OsRng); // 96-bits; unique per message
// let ciphertext = cipher.encrypt(&nonce, b"plaintext message".as_ref())?;
// let plaintext = cipher.decrypt(&nonce, ciphertext.as_ref())?;
// TODO: THIS IS INSECURE!!!!!
// We are just sending strings of the public key without any verification the other party holds the private key.
let mut actual = [0; REMOTE_IDENTITY_LEN];
io.read_exact(&mut actual).await.unwrap(); // TODO: Error handling + timeout
let remote = RemoteIdentity::from_bytes(&actual).unwrap(); // TODO: Error handling
io.write_all(&identity.to_remote_identity().get_bytes())
.await
.unwrap(); // TODO: Error handling + timeout
// TODO: Do we have something to compare against? I don't think so this is fine.
// if expected.get_bytes() != actual {
// panic!("Mismatch in remote identity!");
// }
Self {
io,
me: identity,
remote,
}
}
pub(crate) async fn write_discriminator(&mut self) -> io::Result<()> {
// TODO: Timeout if the peer doesn't accept the byte quick enough
self.0.write_all(&[UNICAST_DISCRIMINATOR]).await
pub(crate) async fn new_outbound(identity: Identity, mut io: Compat<Stream>) -> Self {
// TODO: Use SPAKE not some handrolled insecure mess
// let challenge = rand::thread_rng().gen::<[u8; CHALLENGE_LENGTH]>();
// self.0.write_all(&challenge).await?;
// TODO: THIS IS INSECURE!!!!!
// We are just sending strings of the public key without any verification the other party holds the private key.
io.write_all(&identity.to_remote_identity().get_bytes())
.await
.unwrap(); // TODO: Timeout
let mut actual = [0; REMOTE_IDENTITY_LEN];
io.read_exact(&mut actual).await.unwrap(); // TODO: Timeout
let remote = RemoteIdentity::from_bytes(&actual).unwrap(); // TODO: Error handling
// TODO: Do we have something to compare against? I don't think so this is fine.
// if expected.get_bytes() != actual {
// panic!("Mismatch in remote identity!");
// }
Self {
io,
me: identity,
remote,
}
}
pub fn remote_identity(&self) -> RemoteIdentity {
self.remote
}
pub async fn close(self) -> Result<(), io::Error> {
self.0.into_inner().close().await
self.io.into_inner().close().await
}
}
@ -81,7 +149,7 @@ impl AsyncRead for UnicastStream {
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
Pin::new(&mut self.get_mut().0).poll_read(cx, buf)
Pin::new(&mut self.get_mut().io).poll_read(cx, buf)
}
}
@ -91,14 +159,42 @@ impl AsyncWrite for UnicastStream {
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.get_mut().0).poll_write(cx, buf)
Pin::new(&mut self.get_mut().io).poll_write(cx, buf)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.get_mut().0).poll_flush(cx)
Pin::new(&mut self.get_mut().io).poll_flush(cx)
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.get_mut().0).poll_shutdown(cx)
Pin::new(&mut self.get_mut().io).poll_shutdown(cx)
}
}
#[derive(Debug)]
pub struct UnicastStreamBuilder {
identity: Identity,
io: Compat<Stream>,
}
impl UnicastStreamBuilder {
pub(crate) fn new(identity: Identity, io: Compat<Stream>) -> Self {
Self { identity, io }
}
pub(crate) async fn build(mut self, manager: &Manager, peer_id: PeerId) -> UnicastStream {
// TODO: Timeout if the peer doesn't accept the byte quick enough
self.io.write_all(&[UNICAST_DISCRIMINATOR]).await.unwrap();
let stream = UnicastStream::new_outbound(self.identity, self.io).await;
manager
.state
.write()
.unwrap_or_else(PoisonError::into_inner)
.connected
.insert(peer_id, stream.remote_identity());
stream
}
}

View file

@ -1,11 +1,17 @@
use std::hash::{Hash, Hasher};
use std::{
hash::{Hash, Hasher},
str::FromStr,
};
use base64::{engine::general_purpose, Engine};
use ed25519_dalek::{VerifyingKey, SECRET_KEY_LENGTH};
use rand_core::OsRng;
use serde::Serialize;
use serde::{Deserialize, Serialize};
use specta::Type;
use thiserror::Error;
pub const REMOTE_IDENTITY_LEN: usize = 32;
#[derive(Debug, Error)]
#[error(transparent)]
pub enum IdentityErr {
@ -14,9 +20,10 @@ pub enum IdentityErr {
#[error("Invalid key length")]
InvalidKeyLength,
}
/// TODO
#[derive(Debug)]
pub struct Identity(ed25519_dalek::SigningKey);
#[derive(Debug, Clone)]
pub struct Identity(ed25519_dalek::SigningKey); // TODO: Zeroize on this type
impl PartialEq for Identity {
fn eq(&self, other: &Self) -> bool {
@ -51,7 +58,8 @@ impl Identity {
RemoteIdentity(self.0.verifying_key())
}
}
#[derive(Clone, PartialEq, Eq)]
#[derive(Copy, Clone, PartialEq, Eq)]
pub struct RemoteIdentity(ed25519_dalek::VerifyingKey);
impl Hash for RemoteIdentity {
@ -63,14 +71,70 @@ impl Hash for RemoteIdentity {
impl std::fmt::Debug for RemoteIdentity {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("RemoteIdentity")
.field(&hex::encode(self.0.as_bytes()))
.field(&general_purpose::STANDARD_NO_PAD.encode(self.0.as_bytes()))
.finish()
}
}
impl std::fmt::Display for RemoteIdentity {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(&general_purpose::STANDARD_NO_PAD.encode(self.0.as_bytes()))
}
}
impl Serialize for RemoteIdentity {
fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
serializer.serialize_str(&hex::encode(self.0.as_bytes()))
serializer.serialize_str(&general_purpose::STANDARD_NO_PAD.encode(self.0.as_bytes()))
}
}
impl<'de> Deserialize<'de> for RemoteIdentity {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
let bytes = general_purpose::STANDARD_NO_PAD
.decode(s)
.map_err(serde::de::Error::custom)?;
Ok(Self(
ed25519_dalek::VerifyingKey::from_bytes(
bytes[..SECRET_KEY_LENGTH]
.try_into()
.map_err(|_| serde::de::Error::custom("Invalid key length"))?,
)
.map_err(serde::de::Error::custom)?,
))
}
}
impl TryFrom<String> for RemoteIdentity {
type Error = IdentityErr;
fn try_from(value: String) -> Result<Self, Self::Error> {
let bytes = general_purpose::STANDARD_NO_PAD
.decode(value)
.map_err(|_| IdentityErr::InvalidKeyLength)?;
Ok(Self(ed25519_dalek::VerifyingKey::from_bytes(
bytes[..SECRET_KEY_LENGTH]
.try_into()
.map_err(|_| IdentityErr::InvalidKeyLength)?,
)?))
}
}
impl FromStr for RemoteIdentity {
type Err = IdentityErr;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let bytes = general_purpose::STANDARD_NO_PAD
.decode(s)
.map_err(|_| IdentityErr::InvalidKeyLength)?;
Ok(Self(ed25519_dalek::VerifyingKey::from_bytes(
bytes[..SECRET_KEY_LENGTH]
.try_into()
.map_err(|_| IdentityErr::InvalidKeyLength)?,
)?))
}
}
@ -92,7 +156,7 @@ impl RemoteIdentity {
)?))
}
pub fn to_bytes(&self) -> [u8; 32] {
pub fn get_bytes(&self) -> [u8; REMOTE_IDENTITY_LEN] {
self.0.to_bytes()
}
@ -100,3 +164,9 @@ impl RemoteIdentity {
self.0
}
}
impl From<ed25519_dalek::SigningKey> for Identity {
fn from(value: ed25519_dalek::SigningKey) -> Self {
Self(value)
}
}

View file

@ -1,6 +1,9 @@
use ed25519_dalek::SigningKey;
use libp2p::identity::ed25519::{self};
use serde::{Deserialize, Serialize};
use crate::spacetunnel::{Identity, RemoteIdentity};
#[derive(Debug, Clone)]
pub struct Keypair(ed25519::Keypair);
@ -9,14 +12,19 @@ impl Keypair {
Self(ed25519::Keypair::generate())
}
pub fn peer_id(&self) -> crate::PeerId {
let pk: libp2p::identity::PublicKey = self.0.public().into();
crate::PeerId(libp2p::PeerId::from_public_key(&pk))
pub fn to_identity(&self) -> Identity {
// This depends on libp2p implementation details which isn't great
SigningKey::from_keypair_bytes(&self.0.to_bytes())
.unwrap()
.into()
}
// TODO: Maybe try and remove
pub fn raw_peer_id(&self) -> libp2p::PeerId {
pub fn to_remote_identity(&self) -> RemoteIdentity {
self.to_identity().to_remote_identity()
}
// TODO: Make this `pub(crate)`
pub fn peer_id(&self) -> libp2p::PeerId {
let pk: libp2p::identity::PublicKey = self.0.public().into();
libp2p::PeerId::from_public_key(&pk)

View file

@ -1,12 +1,10 @@
use std::{collections::HashMap, fmt::Debug};
use crate::PeerId;
/// this trait must be implemented for the metadata type to allow it to be converted to MDNS DNS records.
pub trait Metadata: Debug + Clone + Send + Sync + 'static {
fn to_hashmap(self) -> HashMap<String, String>;
fn from_hashmap(peer_id: &PeerId, data: &HashMap<String, String>) -> Result<Self, String>
fn from_hashmap(data: &HashMap<String, String>) -> Result<Self, String>
where
Self: Sized;
}

View file

@ -1,9 +1,7 @@
mod keypair;
mod metadata;
mod multiaddr;
mod peer_id;
pub use keypair::*;
pub use metadata::*;
pub(crate) use multiaddr::*;
pub use peer_id::*;

View file

@ -1,35 +0,0 @@
use std::{fmt::Display, str::FromStr};
#[derive(Debug, Clone, Copy, Eq, Hash, Ord, PartialEq, PartialOrd)]
#[cfg_attr(feature = "specta", derive(specta::Type))]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(any(feature = "specta", feature = "serde"), serde(transparent))]
pub struct PeerId(
#[cfg_attr(any(feature = "specta", feature = "serde"), specta(type = String))]
pub(crate) libp2p::PeerId,
);
impl PeerId {
// pub fn to_string(&self) -> String {
// self.0.to_string()
// }
pub fn random() -> Self {
Self(libp2p::PeerId::random())
}
}
impl FromStr for PeerId {
#[allow(deprecated)]
type Err = libp2p::identity::ParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Self(libp2p::PeerId::from_str(s)?))
}
}
impl Display for PeerId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.0)
}
}

View file

@ -42,7 +42,7 @@ function IncorrectP2PPairingPane() {
console.log(data);
}
});
const nlmState = useBridgeQuery(['p2p.nlmState'], {
const nlmState = useBridgeQuery(['p2p.state'], {
refetchInterval: 1000
});
const libraries = useBridgeQuery(['library.list']);

View file

@ -216,7 +216,7 @@ function SpacedropDialog(props: UseDialogProps) {
onSubmit={form.handleSubmit((data) =>
doSpacedrop.mutateAsync({
file_path: getSpacedropState().droppedFiles,
peer_id: data.targetPeer! // `submitDisabled` ensures this
identity: data.targetPeer! // `submitDisabled` ensures this
})
)}
submitDisabled={value === undefined}

View file

@ -29,7 +29,7 @@ export type Procedures = {
{ key: "notifications.dismiss", input: NotificationId, result: null } |
{ key: "notifications.dismissAll", input: never, result: null } |
{ key: "notifications.get", input: never, result: Notification[] } |
{ key: "p2p.nlmState", input: never, result: { [key: string]: LibraryData } } |
{ key: "p2p.state", input: never, result: P2PState } |
{ key: "preferences.get", input: LibraryArgs<null>, result: LibraryPreferences } |
{ key: "search.ephemeralPaths", input: LibraryArgs<EphemeralPathSearchArgs>, result: NonIndexedFileSystemEntries } |
{ key: "search.objects", input: LibraryArgs<ObjectSearchArgs>, result: SearchData<ExplorerItem> } |
@ -87,7 +87,7 @@ export type Procedures = {
{ key: "notifications.testLibrary", input: LibraryArgs<null>, result: null } |
{ key: "p2p.acceptSpacedrop", input: [string, string | null], result: null } |
{ key: "p2p.cancelSpacedrop", input: string, result: null } |
{ key: "p2p.pair", input: PeerId, result: number } |
{ key: "p2p.pair", input: string, result: number } |
{ key: "p2p.pairingResponse", input: [number, PairingDecision], result: null } |
{ key: "p2p.spacedrop", input: SpacedropArgs, result: string } |
{ key: "preferences.update", input: LibraryArgs<LibraryPreferences>, result: null } |
@ -235,8 +235,6 @@ export type IndexerRule = { id: number; pub_id: number[]; name: string | null; d
*/
export type IndexerRuleCreateArgs = { name: string; dry_run: boolean; rules: ([RuleKind, string[]])[] }
export type InstanceState = "Unavailable" | { Discovered: PeerId } | { Connected: PeerId }
export type InvalidateOperationEvent = { type: "single"; data: SingleInvalidateOperationEvent } | { type: "all" }
export type JobGroup = { id: string; action: string | null; status: JobStatus; created_at: string; jobs: JobReport[] }
@ -259,8 +257,6 @@ export type LibraryConfig = { name: LibraryName; description: string | null; ins
export type LibraryConfigWrapped = { uuid: string; instance_id: string; instance_public_key: string; config: LibraryConfig }
export type LibraryData = { instances: { [key: string]: InstanceState } }
export type LibraryName = string
export type LibraryPreferences = { location?: { [key: string]: LocationSettings } }
@ -290,6 +286,13 @@ export type LocationUpdateArgs = { id: number; name: string | null; generate_pre
export type LocationWithIndexerRules = { id: number; pub_id: number[]; name: string | null; path: string | null; total_capacity: number | null; available_capacity: number | null; size_in_bytes: number[] | null; is_archived: boolean | null; generate_preview_media: boolean | null; sync_preview_media: boolean | null; hidden: boolean | null; date_created: string | null; instance_id: number | null; indexer_rules: { indexer_rule: IndexerRule }[] }
/**
* The configuration for the P2P Manager
* DO NOT MAKE BREAKING CHANGES - This is embedded in the `node_config.json`
* For future me: `Keypair` is not on here cause hot reloading it hard.
*/
export type ManagerConfig = { enabled: boolean; port?: number | null }
export type MaybeNot<T> = T | { not: T }
export type MaybeUndefined<T> = null | null | T
@ -354,7 +357,9 @@ export type Orientation = "Normal" | "CW90" | "CW180" | "CW270" | "MirroredVerti
/**
* TODO: P2P event for the frontend
*/
export type P2PEvent = { type: "DiscoveredPeer"; peer_id: PeerId; metadata: PeerMetadata } | { type: "ExpiredPeer"; peer_id: PeerId } | { type: "ConnectedPeer"; peer_id: PeerId } | { type: "DisconnectedPeer"; peer_id: PeerId } | { type: "SpacedropRequest"; id: string; peer_id: PeerId; peer_name: string; files: string[] } | { type: "SpacedropProgress"; id: string; percent: number } | { type: "SpacedropTimedout"; id: string } | { type: "SpacedropRejected"; id: string } | { type: "PairingRequest"; id: number; name: string; os: OperatingSystem } | { type: "PairingProgress"; id: number; status: PairingStatus }
export type P2PEvent = { type: "DiscoveredPeer"; identity: string; metadata: PeerMetadata } | { type: "ExpiredPeer"; identity: string } | { type: "ConnectedPeer"; identity: string } | { type: "DisconnectedPeer"; identity: string } | { type: "SpacedropRequest"; id: string; identity: string; peer_name: string; files: string[] } | { type: "SpacedropProgress"; id: string; percent: number } | { type: "SpacedropTimedout"; id: string } | { type: "SpacedropRejected"; id: string } | { type: "PairingRequest"; id: number; name: string; os: OperatingSystem } | { type: "PairingProgress"; id: number; status: PairingStatus }
export type P2PState = { node: { [key: string]: PeerStatus }; libraries: ([string, { [key: string]: PeerStatus }])[]; self_peer_id: PeerId; self_identity: string; config: ManagerConfig; manager_connected: { [key: PeerId]: string }; manager_connections: PeerId[]; dicovery_services: { [key: string]: { [key: string]: string } | null }; discovery_discovered: { [key: string]: { [key: string]: [PeerId, { [key: string]: string }, string[]] } }; discovery_known: { [key: string]: string[] } }
export type PairingDecision = { decision: "accept"; libraryId: string } | { decision: "reject" }
@ -362,7 +367,9 @@ export type PairingStatus = { type: "EstablishingConnection" } | { type: "Pairin
export type PeerId = string
export type PeerMetadata = { name: string; operating_system: OperatingSystem | null; version: string | null; email: string | null; img_url: string | null }
export type PeerMetadata = { name: string; operating_system: OperatingSystem | null; version: string | null }
export type PeerStatus = "Unavailable" | "Discovered" | "Connected"
export type PlusCode = string
@ -402,7 +409,7 @@ export type SingleInvalidateOperationEvent = { key: string; arg: any; result: an
export type SortOrder = "Asc" | "Desc"
export type SpacedropArgs = { peer_id: PeerId; file_path: string[] }
export type SpacedropArgs = { identity: string; file_path: string[] }
export type Statistics = { id: number; date_captured: string; total_object_count: number; library_db_size: string; total_bytes_used: string; total_bytes_capacity: string; total_unique_bytes: string; total_bytes_free: string; preview_media_bytes: string }

View file

@ -33,16 +33,16 @@ export function P2PContextProvider({ children }: PropsWithChildren) {
events.current.dispatchEvent(new CustomEvent('p2p-event', { detail: data }));
if (data.type === 'DiscoveredPeer') {
discoveredPeers.set(data.peer_id, data.metadata);
discoveredPeers.set(data.identity, data.metadata);
setDiscoveredPeer([discoveredPeers]);
} else if (data.type === 'ExpiredPeer') {
discoveredPeers.delete(data.peer_id);
discoveredPeers.delete(data.identity);
setDiscoveredPeer([discoveredPeers]);
} else if (data.type === 'ConnectedPeer') {
connectedPeers.set(data.peer_id, undefined);
connectedPeers.set(data.identity, undefined);
setConnectedPeers([connectedPeers]);
} else if (data.type === 'DisconnectedPeer') {
connectedPeers.delete(data.peer_id);
connectedPeers.delete(data.identity);
setConnectedPeers([connectedPeers]);
} else if (data.type === 'PairingProgress') {
setPairingStatus([pairingStatus.set(data.id, data.status)]);