Fix P2P not working for libraries (#2031)

* P2P Debug route

* Remove legacy peer to peer pairing process

* Fix error typo

* Sync instances with cloud

* Upgrade deps + extended instance data

* Create instance with extended metadata

* Auto sync instances

* Actually `.await`

* bruh

* sync library info

* this isn't gonna work

* only sleep cloud receiver when no more messages (#1985)

* [ENG-1567] Fix renaming (#1986)

fix rename

* only sleep cloud receiver when no more messages

* use in memory instances during cloud receive (#1995)

* use in memory instances during cloud receive

* is_empty

---------

Co-authored-by: nikec <43032218+niikeec@users.noreply.github.com>

* fix type error

* wip

* make mdns mdns better

* rebuild state

* Add hooks + listeners + discovered state

* Split into crates

* wip fixing core + wip merging Spacetime into `sd-p2p2`

* `SmartLockGuard` + `Listener`

* Make `sd-core` compile

* Reenable all operation receivers

* Fix all broken code within `sd-core`

* minor fixes found in review

* Bring in `libp2p` + restructure `sd-p2p` for the gazillion-th time

* whoops

* Compile no matter the (runtime) cost

* fixing merge issues

* wip

* a

* b

* C

* Handle port betterer

* c

* Migrate node config

* a

* no crash on startup

* wip

* a

* jdfhskjfsg

* a

* fix discovery

* a bunch of fixes

* getting Spacedrop working

* I don't get why it no worky

* debug example

* a

* wip

* wip

* removing logging from stream impl

* wip: shit is fucked

* Redo quic integration  + Spacedrop working

* Fix shutdown - deadlocks + shutdown peers

* Add Prisma migrations

* Fix shutdown

* a

* fix

* cleanup

* The lord clippy hath spoken

* disable P2P settings for now

---------

Co-authored-by: Brendan Allan <brendonovich@outlook.com>
Co-authored-by: nikec <43032218+niikeec@users.noreply.github.com>
This commit is contained in:
Oscar Beaumont 2024-02-21 16:13:40 +08:00 committed by GitHub
parent af8dbf7789
commit 519b1b6c46
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
88 changed files with 2916 additions and 3752 deletions

1
.gitignore vendored
View file

@ -82,3 +82,4 @@ spacedrive
.cargo/config.toml
.github/scripts/deps
.vite-inspect
vite.config.ts.*

430
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -57,7 +57,7 @@ base64 = "0.21.5"
blake3 = "1.5.0"
chrono = "0.4.31"
clap = "4.4.7"
futures = "0.3.29"
futures = "0.3.30"
futures-concurrency = "7.4.3"
hex = "0.4.3"
http = "0.2.9"
@ -76,7 +76,7 @@ strum = "0.25"
strum_macros = "0.25"
tempfile = "3.8.1"
thiserror = "1.0.50"
tokio = "1.34.0"
tokio = "1.36.0"
tokio-stream = "0.1.14"
tokio-util = "0.7.10"
uhlc = "=0.5.2"
@ -87,13 +87,19 @@ webp = "0.2.6"
# Proper IOS Support
if-watch = { git = "https://github.com/oscartbeaumont/if-watch.git", rev = "a92c17d3f85c1c6fb0afeeaf6c2b24d0b147e8c3" }
# Beta features
# We hack it to the high heavens
rspc = { git = "https://github.com/spacedriveapp/rspc.git", rev = "f3347e2e8bfe3f37bfacc437ca329fe71cdcb048" }
# `cursor_position` method
tauri = { git = "https://github.com/spacedriveapp/tauri.git", rev = "8409af71a83d631ff9d1cd876c441a57511a1cbd" }
tao = { git = "https://github.com/spacedriveapp/tao", rev = "7880adbc090402c44fbcf006669458fa82623403" }
# Add `Control::open_stream_with_addrs`
libp2p = { git = "https://github.com/spacedriveapp/rust-libp2p.git", rev = "a005656df7e82059a0eb2e333ebada4731d23f8c" }
libp2p-core = { git = "https://github.com/spacedriveapp/rust-libp2p.git", rev = "a005656df7e82059a0eb2e333ebada4731d23f8c" }
libp2p-swarm = { git = "https://github.com/spacedriveapp/rust-libp2p.git", rev = "a005656df7e82059a0eb2e333ebada4731d23f8c" }
libp2p-stream = { git = "https://github.com/spacedriveapp/rust-libp2p.git", rev = "a005656df7e82059a0eb2e333ebada4731d23f8c" }
# Set the settings for build scripts and proc-macros.
[profile.dev.build-override]
opt-level = 3

View file

@ -37,8 +37,9 @@ const GeneralSettingsScreen = ({ navigation }: SettingsStackScreenProps<'General
{/* Node Name and Port */}
<SettingsTitle style={tw`mb-1`}>Node Name</SettingsTitle>
<Input value={node.name} />
<SettingsTitle style={tw`mb-1 mt-3`}>Node Port</SettingsTitle>
<Input value={node.p2p_port?.toString() ?? '5795'} keyboardType="numeric" />
{/* // TODO: Bring this back */}
{/* <SettingsTitle style={tw`mb-1 mt-3`}>Node Port</SettingsTitle> */}
{/* <Input value={node.p2p_port?.toString() ?? '5795'} keyboardType="numeric" /> */}
</Card>
{debugState.enabled && (
<Card style={tw`mt-4`}>

View file

@ -38,7 +38,10 @@ sd-images = { path = "../crates/images", features = [
"specta",
] }
sd-media-metadata = { path = "../crates/media-metadata" }
sd-p2p = { path = "../crates/p2p", features = ["specta", "serde"] }
sd-p2p2 = { path = "../crates/p2p2", features = ["specta"] }
sd-p2p-block = { path = "../crates/p2p-block" }
sd-p2p-proto = { path = "../crates/p2p-proto" }
sd-p2p-tunnel = { path = "../crates/p2p-tunnel" }
sd-prisma = { path = "../crates/prisma" }
sd-ai = { path = "../crates/ai", optional = true }
sd-sync = { path = "../crates/sync" }

View file

@ -0,0 +1,33 @@
/*
Warnings:
- You are about to drop the column `pub_id` on the `label` table. All the data in the column will be lost.
- You are about to drop the column `node_peer_id` on the `node` table. All the data in the column will be lost.
*/
-- RedefineTables
PRAGMA foreign_keys=OFF;
CREATE TABLE "new_label" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"name" TEXT NOT NULL,
"date_created" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"date_modified" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
);
INSERT INTO "new_label" ("date_created", "date_modified", "id", "name") SELECT "date_created", "date_modified", "id", "name" FROM "label";
DROP TABLE "label";
ALTER TABLE "new_label" RENAME TO "label";
CREATE UNIQUE INDEX "label_name_key" ON "label"("name");
CREATE TABLE "new_node" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"pub_id" BLOB NOT NULL,
"name" TEXT NOT NULL,
"platform" INTEGER NOT NULL,
"date_created" DATETIME NOT NULL,
"identity" BLOB
);
INSERT INTO "new_node" ("date_created", "id", "identity", "name", "platform", "pub_id") SELECT "date_created", "id", "identity", "name", "platform", "pub_id" FROM "node";
DROP TABLE "node";
ALTER TABLE "new_node" RENAME TO "node";
CREATE UNIQUE INDEX "node_pub_id_key" ON "node"("pub_id");
PRAGMA foreign_key_check;
PRAGMA foreign_keys=ON;

View file

@ -43,7 +43,6 @@ model Node {
platform Int
date_created DateTime
identity Bytes? // TODO: Change to required field in future
node_peer_id String? // TODO: Remove as part of - https://linear.app/spacedriveapp/issue/ENG-757/p2p-library-portability
@@map("node")
}
@ -72,8 +71,8 @@ model Instance {
locations Location[]
CRDTOperation CRDTOperation[]
CloudCRDTOperation CloudCRDTOperation[]
CRDTOperation CRDTOperation[]
CloudCRDTOperation CloudCRDTOperation[]
@@map("instance")
}
@ -360,11 +359,11 @@ model Label {
model LabelOnObject {
date_created DateTime @default(now())
object_id Int
object Object @relation(fields: [object_id], references: [id], onDelete: Restrict)
object_id Int
object Object @relation(fields: [object_id], references: [id], onDelete: Restrict)
label_id Int
label Label @relation(fields: [label_id], references: [id], onDelete: Restrict)
label_id Int
label Label @relation(fields: [label_id], references: [id], onDelete: Restrict)
@@id([label_id, object_id])
@@map("label_on_object")

View file

@ -9,7 +9,7 @@ use crate::{
use futures::StreamExt;
use sd_cache::{Model, Normalise, NormalisedResult, NormalisedResults};
use sd_file_ext::kind::ObjectKind;
use sd_p2p::spacetunnel::RemoteIdentity;
use sd_p2p2::RemoteIdentity;
use sd_prisma::prisma::{indexer_rule, object, statistics};
use tokio_stream::wrappers::IntervalStream;

View file

@ -2,14 +2,15 @@ use crate::{
invalidate_query,
job::JobProgressEvent,
node::{
config::{NodeConfig, NodePreferences},
config::{NodeConfig, NodePreferences, P2PDiscoveryState, Port},
get_hardware_model_name, HardwareModel,
},
p2p::{into_listener2, Listener2},
Node,
};
use sd_cache::patch_typedef;
use sd_p2p::P2PStatus;
use sd_p2p2::RemoteIdentity;
use std::sync::{atomic::Ordering, Arc};
use itertools::Itertools;
@ -93,8 +94,10 @@ pub struct SanitisedNodeConfig {
pub id: Uuid,
/// name is the display name of the current node. This is set by the user and is shown in the UI. // TODO: Length validation so it can fit in DNS record
pub name: String,
pub p2p_enabled: bool,
pub p2p_port: Option<u16>,
pub identity: RemoteIdentity,
pub p2p_ipv4_port: Port,
pub p2p_ipv6_port: Port,
pub p2p_discovery: P2PDiscoveryState,
pub features: Vec<BackendFeature>,
pub preferences: NodePreferences,
pub image_labeler_version: Option<String>,
@ -105,8 +108,10 @@ impl From<NodeConfig> for SanitisedNodeConfig {
Self {
id: value.id,
name: value.name,
p2p_enabled: value.p2p.enabled,
p2p_port: value.p2p.port,
identity: value.identity.to_remote_identity(),
p2p_ipv4_port: value.p2p_ipv4_port,
p2p_ipv6_port: value.p2p_ipv6_port,
p2p_discovery: value.p2p_discovery,
features: value.features,
preferences: value.preferences,
image_labeler_version: value.image_labeler_version,
@ -119,7 +124,7 @@ struct NodeState {
#[serde(flatten)]
config: SanitisedNodeConfig,
data_path: String,
p2p: P2PStatus,
listeners: Vec<Listener2>,
device_model: Option<String>,
}
@ -155,7 +160,7 @@ pub(crate) fn mount() -> Arc<Router> {
.to_str()
.expect("Found non-UTF-8 path")
.to_string(),
p2p: node.p2p.manager.status(),
listeners: into_listener2(&node.p2p.p2p.listeners()),
device_model: Some(device_model),
})
})

View file

@ -1,4 +1,7 @@
use crate::{invalidate_query, util::MaybeUndefined};
use crate::{
invalidate_query,
node::config::{P2PDiscoveryState, Port},
};
use sd_prisma::prisma::{instance, location};
@ -16,8 +19,9 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
#[derive(Deserialize, Type)]
pub struct ChangeNodeNameArgs {
pub name: Option<String>,
pub p2p_port: MaybeUndefined<u16>,
pub p2p_enabled: Option<bool>,
pub p2p_ipv4_port: Option<Port>,
pub p2p_ipv6_port: Option<Port>,
pub p2p_discovery: Option<P2PDiscoveryState>,
pub image_labeler_version: Option<String>,
}
R.mutation(|node, args: ChangeNodeNameArgs| async move {
@ -30,9 +34,6 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
}
}
let does_p2p_need_refresh =
args.p2p_enabled.is_some() || args.p2p_port.is_defined();
#[cfg(feature = "ai")]
let mut new_model = None;
@ -42,11 +43,15 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
config.name = name;
}
config.p2p.enabled = args.p2p_enabled.unwrap_or(config.p2p.enabled);
if let Some(v) = args.p2p_port.into() {
config.p2p.port = v;
}
if let Some(port) = args.p2p_ipv4_port {
config.p2p_ipv4_port = port;
};
if let Some(port) = args.p2p_ipv6_port {
config.p2p_ipv6_port = port;
};
if let Some(v) = args.p2p_discovery {
config.p2p_discovery = v;
};
#[cfg(feature = "ai")]
if let Some(version) = args.image_labeler_version {
@ -59,9 +64,9 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
new_model = sd_ai::image_labeler::YoloV8::model(Some(&version))
.map_err(|e| {
error!(
"Failed to crate image_detection model: '{}'; Error: {e:#?}",
&version,
);
"Failed to crate image_detection model: '{}'; Error: {e:#?}",
&version,
);
})
.ok();
if new_model.is_some() {
@ -79,13 +84,8 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
)
})?;
// If a P2P config was modified reload it
if does_p2p_need_refresh {
node.p2p
.manager
.update_config(node.config.get().await.p2p.clone())
.await;
}
// This is a no-op if the config didn't change
node.p2p.on_node_config_change().await;
invalidate_query!(node; node, "nodeState");

View file

@ -1,11 +1,12 @@
use crate::p2p::{operations, P2PEvent};
use crate::p2p::{operations, Header, P2PEvent, PeerMetadata};
use sd_p2p::spacetunnel::RemoteIdentity;
use sd_p2p2::RemoteIdentity;
use rspc::{alpha::AlphaRouter, ErrorCode};
use serde::Deserialize;
use specta::Type;
use std::path::PathBuf;
use tokio::io::AsyncWriteExt;
use uuid::Uuid;
use super::{Ctx, R};
@ -14,26 +15,21 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
R.router()
.procedure("events", {
R.subscription(|node, _: ()| async move {
let mut rx = node.p2p.subscribe();
let mut rx = node.p2p.events.subscribe();
let mut queued = Vec::new();
// TODO: Don't block subscription start
for peer in node.p2p.node.get_discovered() {
queued.push(P2PEvent::DiscoveredPeer {
identity: peer.identity,
metadata: peer.metadata,
});
}
// TODO: Don't block subscription start
for identity in node.p2p.manager.get_connected_peers().await.map_err(|_| {
rspc::Error::new(
ErrorCode::InternalServerError,
"todo: error getting connected peers".into(),
)
})? {
queued.push(P2PEvent::ConnectedPeer { identity });
for (identity, peer, metadata) in
node.p2p.p2p.peers().iter().filter_map(|(i, p)| {
PeerMetadata::from_hashmap(&p.metadata())
.ok()
.map(|m| (i, p, m))
}) {
let identity = *identity;
match peer.is_connected() {
true => queued.push(P2PEvent::ConnectedPeer { identity }),
false => queued.push(P2PEvent::DiscoveredPeer { identity, metadata }),
}
}
Ok(async_stream::stream! {
@ -48,10 +44,36 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
})
})
.procedure("state", {
R.query(|node, _: ()| async move {
// TODO: This has a potentially invalid map key and Specta don't like that.
// TODO: This will bypass that check and for an debug route that's fine.
Ok(serde_json::to_value(node.p2p.state()).unwrap())
R.query(|node, _: ()| async move { Ok(node.p2p.state().await) })
})
.procedure("debugConnect", {
R.mutation(|node, identity: RemoteIdentity| async move {
let peer = { node.p2p.p2p.peers().get(&identity).cloned() };
let mut stream = peer
.ok_or(rspc::Error::new(
ErrorCode::InternalServerError,
"big man, offline".into(),
))?
.new_stream()
.await
.map_err(|err| {
rspc::Error::new(
ErrorCode::InternalServerError,
format!("error in peer.new_stream: {:?}", err),
)
})?;
stream
.write_all(&Header::Ping.to_bytes())
.await
.map_err(|err| {
rspc::Error::new(
ErrorCode::InternalServerError,
format!("error sending ping header: {:?}", err),
)
})?;
Ok("connected")
})
})
.procedure("spacedrop", {

View file

@ -3,7 +3,7 @@ use crate::library::{Libraries, Library};
use super::{err_break, err_return, CompressedCRDTOperations};
use sd_cloud_api::RequestConfigProvider;
use sd_core_sync::NTP64;
use sd_p2p::spacetunnel::{IdentityOrRemoteIdentity, RemoteIdentity};
use sd_p2p2::{IdentityOrRemoteIdentity, RemoteIdentity};
use sd_prisma::prisma::{cloud_crdt_operation, instance, PrismaClient, SortOrder};
use sd_sync::CRDTOperation;
use sd_utils::uuid_to_bytes;

View file

@ -9,10 +9,8 @@ use crate::{
use sd_file_ext::text::is_text;
use sd_file_path_helper::{file_path_to_handle_custom_uri, IsolatedFilePathData};
use sd_p2p::{
spaceblock::Range,
spacetunnel::{IdentityOrRemoteIdentity, RemoteIdentity},
};
use sd_p2p2::{IdentityOrRemoteIdentity, RemoteIdentity};
use sd_p2p_block::Range;
use sd_prisma::prisma::{file_path, location};
use sd_utils::db::maybe_missing;
@ -243,45 +241,43 @@ pub fn router(node: Arc<Node>) -> Router<()> {
}
// TODO: Support `Range` requests and `ETag` headers
match state.node.p2p.get_library_service(&library.id) {
Some(service) => {
let stream = service
.connect(state.node.p2p.manager.clone(), &identity)
.await
.map_err(|err| {
not_found(format!(
"Error connecting to {identity}: {err:?}"
))
})?;
let stream = state
.node
.p2p
.get_instance(&library.id, identity)
.ok_or_else(|| {
not_found(format!("Error connecting to {identity}: no connection method available"))
})?
.new_stream()
.await
.map_err(|err| {
not_found(format!("Error connecting to {identity}: {err:?}"))
})?;
let (tx, mut rx) =
tokio::sync::mpsc::channel::<io::Result<Bytes>>(150);
// TODO: We only start a thread because of stupid `ManagerStreamAction2` and libp2p's `!Send/!Sync` bounds on a stream.
tokio::spawn(async move {
let Ok(()) = operations::request_file(
stream,
&library,
file_path_pub_id,
Range::Full,
MpscToAsyncWrite::new(PollSender::new(tx)),
)
.await
else {
return;
};
});
let (tx, mut rx) = tokio::sync::mpsc::channel::<io::Result<Bytes>>(150);
// TODO: We only start a thread because of stupid `ManagerStreamAction2` and libp2p's `!Send/!Sync` bounds on a stream.
tokio::spawn(async move {
let Ok(()) = operations::request_file(
stream,
&library,
file_path_pub_id,
Range::Full,
MpscToAsyncWrite::new(PollSender::new(tx)),
)
.await
else {
return;
};
});
// TODO: Content Type
Ok(InfallibleResponse::builder().status(StatusCode::OK).body(
body::boxed(StreamBody::new(stream! {
while let Some(item) = rx.recv().await {
yield item;
}
})),
))
}
None => Ok(not_found(())),
}
// TODO: Content Type
Ok(InfallibleResponse::builder().status(StatusCode::OK).body(
body::boxed(StreamBody::new(stream! {
while let Some(item) = rx.recv().await {
yield item;
}
})),
))
}
}
},

View file

@ -114,7 +114,9 @@ impl Node {
let (jobs, jobs_actor) = job::Jobs::new();
let libraries = library::Libraries::new(data_dir.join("libraries")).await?;
let (p2p, p2p_actor) = p2p::P2PManager::new(config.clone(), libraries.clone()).await?;
let (p2p, start_p2p) = p2p::P2PManager::new(config.clone(), libraries.clone())
.await
.map_err(NodeError::P2PManager)?;
let node =
Arc::new(Node {
data_dir: data_dir.to_path_buf(),
@ -160,7 +162,7 @@ impl Node {
locations_actor.start(node.clone());
node.libraries.init(&node).await?;
jobs_actor.start(node.clone());
p2p_actor.start(node.clone());
start_p2p(node.clone());
let router = api::mount();
@ -188,7 +190,7 @@ impl Node {
std::env::set_var(
"RUST_LOG",
format!("info,sd_core={level},sd_core::location::manager=info,sd_ai={level}"),
format!("info,sd_core={level},sd_p2p=debug,sd_core::location::manager=info,sd_ai={level}"),
);
}
@ -325,7 +327,7 @@ pub enum NodeError {
#[error("failed to initialize location manager: {0}")]
LocationManager(#[from] LocationManagerError),
#[error("failed to initialize p2p manager: {0}")]
P2PManager(#[from] sd_p2p::ManagerError),
P2PManager(String),
#[error("invalid platform integer: {0}")]
InvalidPlatformInt(u8),
#[cfg(debug_assertions)]

View file

@ -3,7 +3,7 @@ use crate::{
util::version_manager::{Kind, ManagedVersion, VersionManager, VersionManagerError},
};
use sd_p2p::spacetunnel::{Identity, IdentityOrRemoteIdentity};
use sd_p2p2::{Identity, IdentityOrRemoteIdentity};
use sd_prisma::prisma::{file_path, indexer_rule, instance, location, node, PrismaClient};
use sd_utils::{db::maybe_missing, error::FileIOError};
@ -163,12 +163,7 @@ impl LibraryConfig {
db.node()
.update_many(
vec![],
vec![
node::pub_id::set(node_config.id.as_bytes().to_vec()),
node::node_peer_id::set(Some(
node_config.keypair.peer_id().to_string(),
)),
],
vec![node::pub_id::set(node_config.id.as_bytes().to_vec())],
)
.exec()
.await?;

View file

@ -1,7 +1,7 @@
use crate::{api::CoreEvent, object::media::thumbnail::get_indexed_thumbnail_path, sync, Node};
use sd_file_path_helper::{file_path_to_full_path, IsolatedFilePathData};
use sd_p2p::spacetunnel::Identity;
use sd_p2p2::Identity;
use sd_prisma::prisma::{file_path, location, PrismaClient};
use sd_utils::{db::maybe_missing, error::FileIOError};
@ -67,6 +67,7 @@ impl Debug for Library {
}
impl Library {
#[allow(clippy::too_many_arguments)]
pub async fn new(
id: Uuid,
config: LibraryConfig,

View file

@ -3,7 +3,7 @@ use crate::{
location::{indexer, LocationManagerError},
};
use sd_p2p::spacetunnel::IdentityOrRemoteIdentityErr;
use sd_p2p2::IdentityOrRemoteIdentityErr;
use sd_utils::{
db::{self, MissingFieldError},
error::{FileIOError, NonUtf8PathError},

View file

@ -1,20 +1,19 @@
use crate::{
api::{utils::InvalidateOperationEvent, CoreEvent},
invalidate_query,
cloud, invalidate_query,
location::{
indexer,
metadata::{LocationMetadataError, SpacedriveLocationMetadataFile},
},
node::Platform,
object::tag,
p2p::{self},
sync,
p2p, sync,
util::{mpscrr, MaybeUndefined},
Node,
};
use sd_core_sync::SyncMessage;
use sd_p2p::spacetunnel::{Identity, IdentityOrRemoteIdentity};
use sd_p2p2::{Identity, IdentityOrRemoteIdentity};
use sd_prisma::prisma::{crdt_operation, instance, location, SortOrder};
use sd_utils::{
db,
@ -535,7 +534,7 @@ impl Libraries {
loop {
debug!("Syncing library with cloud!");
if let Some(_) = library.config().await.cloud_id {
if library.config().await.cloud_id.is_some() {
if let Ok(lib) =
sd_cloud_api::library::get(node.cloud_api_config().await, library.id)
.await
@ -575,7 +574,7 @@ impl Libraries {
}
}
if &lib.name != &*library.config().await.name {
if lib.name != *library.config().await.name {
warn!("Library name on cloud is outdated. Updating...");
if let Err(err) = sd_cloud_api::library::update(
@ -593,17 +592,16 @@ impl Libraries {
}
for instance in lib.instances {
if let Err(err) =
crate::cloud::sync::receive::create_instance(
&library,
&node.libraries,
instance.uuid,
instance.identity,
instance.node_id,
instance.node_name,
instance.node_platform,
)
.await
if let Err(err) = cloud::sync::receive::create_instance(
&library,
&node.libraries,
instance.uuid,
instance.identity,
instance.node_id,
instance.node_name,
instance.node_platform,
)
.await
{
error!(
"Failed to create instance from cloud: {:#?}",

View file

@ -4,7 +4,7 @@ use crate::{
util::version_manager::{Kind, ManagedVersion, VersionManager, VersionManagerError},
};
use sd_p2p::{Keypair, ManagerConfig};
use sd_p2p2::Identity;
use sd_utils::error::FileIOError;
use std::{
@ -28,6 +28,29 @@ use uuid::Uuid;
/// NODE_STATE_CONFIG_NAME is the name of the file which stores the NodeState
pub const NODE_STATE_CONFIG_NAME: &str = "node_state.sdconfig";
#[derive(Debug, Default, Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Type)]
pub enum P2PDiscoveryState {
#[default]
Everyone,
ContactsOnly,
Disabled,
}
#[derive(Debug, Default, Clone, Copy, Serialize, Deserialize, Type)]
#[serde(rename_all = "snake_case", untagged)]
pub enum Port {
Disabled,
#[default]
Random,
Discrete(u16),
}
impl Port {
pub fn is_random(&self) -> bool {
matches!(self, Port::Random)
}
}
/// NodeConfig is the configuration for a node. This is shared between all libraries and is stored in a JSON file on disk.
#[derive(Debug, Clone, Serialize, Deserialize)] // If you are adding `specta::Type` on this your probably about to leak the P2P private key
pub struct NodeConfig {
@ -40,10 +63,15 @@ pub struct NodeConfig {
pub notifications: Vec<Notification>,
/// The p2p identity keypair for this node. This is used to identify the node on the network.
/// This keypair does effectively nothing except for provide libp2p with a stable peer_id.
pub keypair: Keypair,
#[serde(with = "identity_serde")]
pub identity: Identity,
/// P2P config
#[serde(default, skip_serializing_if = "Port::is_random")]
pub p2p_ipv4_port: Port,
#[serde(default, skip_serializing_if = "Port::is_random")]
pub p2p_ipv6_port: Port,
#[serde(default)]
pub p2p: ManagerConfig,
pub p2p_discovery: P2PDiscoveryState,
/// Feature flags enabled on the node
#[serde(default)]
pub features: Vec<BackendFeature>,
@ -60,6 +88,30 @@ pub struct NodeConfig {
version: NodeConfigVersion,
}
mod identity_serde {
use sd_p2p2::Identity;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
pub fn serialize<S>(identity: &Identity, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
to_string(identity).serialize(serializer)
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<Identity, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
Identity::from_bytes(&base91::slice_decode(s.as_bytes())).map_err(serde::de::Error::custom)
}
pub fn to_string(identity: &Identity) -> String {
String::from_utf8_lossy(&base91::slice_encode(&identity.to_bytes())).to_string()
}
}
#[derive(Debug, Clone, Serialize, Deserialize, Default, PartialEq, Eq, Type)]
pub struct NodePreferences {
pub thumbnailer: ThumbnailerPreferences,
@ -73,10 +125,11 @@ pub enum NodeConfigVersion {
V0 = 0,
V1 = 1,
V2 = 2,
V3 = 3,
}
impl ManagedVersion<NodeConfigVersion> for NodeConfig {
const LATEST_VERSION: NodeConfigVersion = NodeConfigVersion::V2;
const LATEST_VERSION: NodeConfigVersion = NodeConfigVersion::V3;
const KIND: Kind = Kind::Json("version");
type MigrationError = NodeConfigError;
@ -99,9 +152,11 @@ impl ManagedVersion<NodeConfigVersion> for NodeConfig {
Some(Self {
id: Uuid::new_v4(),
name,
keypair: Keypair::generate(),
identity: Identity::default(),
p2p_ipv4_port: Port::Random,
p2p_ipv6_port: Port::Random,
p2p_discovery: P2PDiscoveryState::Everyone,
version: Self::LATEST_VERSION,
p2p: ManagerConfig::default(),
features: vec![],
notifications: vec![],
auth_token: None,
@ -173,6 +228,33 @@ impl NodeConfig {
.map_err(|e| FileIOError::from((path, e)))?;
}
(NodeConfigVersion::V2, NodeConfigVersion::V3) => {
let mut config: Map<String, Value> =
serde_json::from_slice(&fs::read(path).await.map_err(|e| {
FileIOError::from((
path,
e,
"Failed to read node config file for migration",
))
})?)
.map_err(VersionManagerError::SerdeJson)?;
config.remove("keypair");
config.remove("p2p");
config.insert(
String::from("identity"),
json!(identity_serde::to_string(&Default::default())),
);
let a =
serde_json::to_vec(&config).map_err(VersionManagerError::SerdeJson)?;
fs::write(path, a)
.await
.map_err(|e| FileIOError::from((path, e)))?;
}
_ => {
error!("Node config version is not handled: {:?}", current);
return Err(VersionManagerError::UnexpectedMigration {

View file

@ -0,0 +1,26 @@
// TODO: This is unused but will be used in the future.
// use std::sync::Arc;
// use sd_p2p2::{flume::bounded, HookEvent, P2P};
// /// A P2P hook which listens for the availability of peers and connects with them.
// pub struct ConnectHook {}
// impl ConnectHook {
// pub fn spawn(p2p: Arc<P2P>) -> Self {
// let (tx, rx) = bounded(15);
// let _ = p2p.register_hook("sd-connect-hook", tx);
// tokio::spawn(async move {
// while let Ok(event) = rx.recv_async().await {
// match event {
// // TODO: Do the thing. For now we don't need this.
// HookEvent::Shutdown { _guard } => break,
// _ => continue,
// }
// }
// });
// Self {}
// }
// }

116
core/src/p2p/events.rs Normal file
View file

@ -0,0 +1,116 @@
use std::sync::Arc;
use sd_p2p2::{flume::bounded, HookEvent, RemoteIdentity, P2P};
use serde::Serialize;
use specta::Type;
use tokio::sync::broadcast;
use uuid::Uuid;
use super::PeerMetadata;
/// TODO: P2P event for the frontend
#[derive(Debug, Clone, Serialize, Type)]
#[serde(tag = "type")]
pub enum P2PEvent {
DiscoveredPeer {
identity: RemoteIdentity,
metadata: PeerMetadata,
},
ExpiredPeer {
identity: RemoteIdentity,
},
ConnectedPeer {
identity: RemoteIdentity,
},
DisconnectedPeer {
identity: RemoteIdentity,
},
SpacedropRequest {
id: Uuid,
identity: RemoteIdentity,
peer_name: String,
files: Vec<String>,
},
SpacedropProgress {
id: Uuid,
percent: u8,
},
SpacedropTimedout {
id: Uuid,
},
SpacedropRejected {
id: Uuid,
},
}
/// A P2P hook which listens for events and sends them over a channel which can be connected to the frontend.
pub struct P2PEvents {
events: (broadcast::Sender<P2PEvent>, broadcast::Receiver<P2PEvent>),
}
impl P2PEvents {
pub fn spawn(p2p: Arc<P2P>) -> Self {
let events = broadcast::channel(15);
let (tx, rx) = bounded(15);
let _ = p2p.register_hook("sd-frontend-events", tx);
let events_tx = events.0.clone();
tokio::spawn(async move {
while let Ok(event) = rx.recv_async().await {
let event = match event {
// We use `HookEvent::PeerUnavailable`/`HookEvent::PeerAvailable` over `HookEvent::PeerExpiredBy`/`HookEvent::PeerDiscoveredBy` so that having an active connection is treated as "discovered".
// It's possible to have an active connection without mDNS data (which is what Peer*By` are for)
HookEvent::PeerAvailable(peer) => {
let metadata = match PeerMetadata::from_hashmap(&peer.metadata()) {
Ok(metadata) => metadata,
Err(e) => {
println!(
"Invalid metadata for peer '{}': {:?}",
peer.identity(),
e
);
continue;
}
};
P2PEvent::DiscoveredPeer {
identity: peer.identity(),
metadata,
}
}
HookEvent::PeerUnavailable(identity) => P2PEvent::ExpiredPeer { identity },
HookEvent::PeerConnectedWith(_, peer) => P2PEvent::ConnectedPeer {
identity: peer.identity(),
},
HookEvent::PeerDisconnectedWith(_, identity) => {
let peers = p2p.peers();
let Some(peer) = peers.get(&identity) else {
continue;
};
if !peer.is_connected() {
P2PEvent::DisconnectedPeer { identity }
} else {
continue;
}
}
HookEvent::Shutdown { _guard } => break,
_ => continue,
};
let _ = events_tx.send(event);
}
});
Self { events }
}
pub fn subscribe(&self) -> broadcast::Receiver<P2PEvent> {
self.events.0.subscribe()
}
#[allow(clippy::result_large_err)]
pub fn send(&self, event: P2PEvent) -> Result<usize, broadcast::error::SendError<P2PEvent>> {
self.events.0.send(event)
}
}

View file

@ -1,70 +1,31 @@
#![allow(unused)] // TODO: Remove this
use std::sync::Arc;
use crate::library::{Libraries, Library, LibraryManagerEvent};
use sd_p2p2::P2P;
use tracing::error;
use sd_p2p::{spacetunnel::IdentityOrRemoteIdentity, Service};
use crate::library::{Libraries, LibraryManagerEvent};
use std::{
collections::HashMap,
fmt,
sync::{Arc, PoisonError, RwLock},
};
use tokio::sync::mpsc;
use tracing::{error, warn};
use uuid::Uuid;
use super::{LibraryMetadata, P2PManager};
pub struct LibraryServices {
services: RwLock<HashMap<Uuid, Arc<Service<LibraryMetadata>>>>,
register_service_tx: mpsc::Sender<Arc<Service<LibraryMetadata>>>,
}
impl fmt::Debug for LibraryServices {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("LibraryServices")
.field(
"services",
&self
.services
.read()
.unwrap_or_else(PoisonError::into_inner)
.keys(),
)
.finish()
}
}
impl LibraryServices {
pub fn new(register_service_tx: mpsc::Sender<Arc<Service<LibraryMetadata>>>) -> Self {
Self {
services: Default::default(),
register_service_tx,
}
}
pub(crate) async fn start(manager: Arc<P2PManager>, libraries: Arc<Libraries>) {
pub fn start(p2p: Arc<P2P>, libraries: Arc<Libraries>) {
tokio::spawn(async move {
if let Err(err) = libraries
.rx
.clone()
.subscribe(|msg| {
let manager = manager.clone();
let p2p = p2p.clone();
async move {
match msg {
LibraryManagerEvent::InstancesModified(library)
| LibraryManagerEvent::Load(library) => {
manager
.clone()
.libraries
.load_library(manager, &library)
.await
p2p.metadata_mut().insert(
library.id.to_string(),
library.identity.to_remote_identity().to_string(),
);
}
LibraryManagerEvent::Edit(library) => {
manager.libraries.edit_library(&library).await
LibraryManagerEvent::Edit(_library) => {
// TODO: Send changes to all connected nodes or queue sending for when they are online!
}
LibraryManagerEvent::Delete(library) => {
manager.libraries.delete_library(&library).await
p2p.metadata_mut().remove(&library.id.to_string());
}
}
}
@ -73,87 +34,5 @@ impl LibraryServices {
{
error!("Core may become unstable! `LibraryServices::start` manager aborted with error: {err:?}");
}
}
pub fn get(&self, id: &Uuid) -> Option<Arc<Service<LibraryMetadata>>> {
self.services
.read()
.unwrap_or_else(PoisonError::into_inner)
.get(id)
.cloned()
}
pub fn libraries(&self) -> Vec<(Uuid, Arc<Service<LibraryMetadata>>)> {
self.services
.read()
.unwrap_or_else(PoisonError::into_inner)
.iter()
.map(|(k, v)| (*k, v.clone()))
.collect::<Vec<_>>()
}
pub(crate) async fn load_library(&self, manager: Arc<P2PManager>, library: &Library) {
let identities = match library.db.instance().find_many(vec![]).exec().await {
Ok(library) => library
.into_iter()
.filter_map(
// TODO: Error handling
|i| match IdentityOrRemoteIdentity::from_bytes(&i.identity) {
Err(err) => {
warn!("error parsing identity: {err:?}");
None
}
Ok(IdentityOrRemoteIdentity::Identity(_)) => None,
Ok(IdentityOrRemoteIdentity::RemoteIdentity(identity)) => Some(identity),
},
)
.collect(),
Err(err) => {
warn!("error loading library '{}': {err:?}", library.id);
return;
}
};
let mut inserted = false;
let service = {
let mut service = self
.services
.write()
.unwrap_or_else(PoisonError::into_inner);
let service = service.entry(library.id).or_insert_with(|| {
inserted = true;
Arc::new(
Service::new(
String::from_utf8_lossy(&base91::slice_encode(library.id.as_bytes())),
manager.manager.clone(),
)
.expect("error creating service with duplicate service name"),
)
});
service.add_known(identities);
service.clone()
};
if inserted {
service.update(LibraryMetadata {});
if self.register_service_tx.send(service).await.is_err() {
warn!("error sending on 'register_service_tx'. This indicates a bug!");
}
}
}
pub(crate) async fn edit_library(&self, _library: &Library) {
// TODO: Send changes to all connected nodes!
// TODO: Update mdns
}
pub(crate) async fn delete_library(&self, library: &Library) {
drop(
self.services
.write()
.unwrap_or_else(PoisonError::into_inner)
.remove(&library.id),
);
}
});
}

View file

@ -1,22 +0,0 @@
use sd_p2p::Metadata;
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use specta::Type;
#[derive(Debug, Clone, Type, Serialize, Deserialize)]
pub struct LibraryMetadata {}
impl Metadata for LibraryMetadata {
fn to_hashmap(self) -> HashMap<String, String> {
HashMap::with_capacity(0)
}
fn from_hashmap(_: &HashMap<String, String>) -> Result<Self, String>
where
Self: Sized,
{
Ok(Self {})
}
}

311
core/src/p2p/manager.rs Normal file
View file

@ -0,0 +1,311 @@
use crate::{
node::{
config::{self, P2PDiscoveryState, Port},
get_hardware_model_name, HardwareModel,
},
p2p::{libraries, operations, sync::SyncMessage, Header, OperatingSystem, SPACEDRIVE_APP_ID},
Node,
};
use sd_p2p2::{
flume::{bounded, Receiver},
Libp2pPeerId, Listener, Mdns, Peer, QuicTransport, RemoteIdentity, UnicastStream, P2P,
};
use sd_p2p_tunnel::Tunnel;
use serde::Serialize;
use serde_json::json;
use specta::Type;
use std::{
collections::{HashMap, HashSet},
net::SocketAddr,
sync::{atomic::AtomicBool, Arc, Mutex, PoisonError},
};
use tokio::sync::oneshot;
use tracing::{error, info};
use uuid::Uuid;
use super::{P2PEvents, PeerMetadata};
pub struct P2PManager {
pub(crate) p2p: Arc<P2P>,
mdns: Mutex<Option<Mdns>>,
quic: QuicTransport,
// The `libp2p::PeerId`. This is for debugging only, use `RemoteIdentity` instead.
lp2p_peer_id: Libp2pPeerId,
pub(crate) events: P2PEvents,
// connect_hook: ConnectHook,
pub(super) spacedrop_pairing_reqs: Arc<Mutex<HashMap<Uuid, oneshot::Sender<Option<String>>>>>,
pub(super) spacedrop_cancelations: Arc<Mutex<HashMap<Uuid, Arc<AtomicBool>>>>,
pub(crate) node_config: Arc<config::Manager>,
}
impl P2PManager {
pub async fn new(
node_config: Arc<config::Manager>,
libraries: Arc<crate::library::Libraries>,
) -> Result<(Arc<P2PManager>, impl FnOnce(Arc<Node>)), String> {
let (tx, rx) = bounded(25);
let p2p = P2P::new(SPACEDRIVE_APP_ID, node_config.get().await.identity, tx);
let (quic, lp2p_peer_id) = QuicTransport::spawn(p2p.clone())?;
let this = Arc::new(Self {
p2p: p2p.clone(),
lp2p_peer_id,
mdns: Mutex::new(None),
quic,
events: P2PEvents::spawn(p2p.clone()),
// connect_hook: ConnectHook::spawn(p2p),
spacedrop_pairing_reqs: Default::default(),
spacedrop_cancelations: Default::default(),
node_config,
});
this.on_node_config_change().await;
libraries::start(this.p2p.clone(), libraries);
info!(
"Node RemoteIdentity('{}') libp2p::PeerId('{:?}') is now online listening at addresses: {:?}",
this.p2p.remote_identity(),
this.lp2p_peer_id,
this.p2p.listeners()
);
Ok((this.clone(), |node| {
tokio::spawn(start(this, node, rx));
}))
}
// TODO: Remove this and add a subscription system to `config::Manager`
pub async fn on_node_config_change(&self) {
let config = self.node_config.get().await;
PeerMetadata {
name: config.name.clone(),
operating_system: Some(OperatingSystem::get_os()),
device_model: Some(get_hardware_model_name().unwrap_or(HardwareModel::Other)),
version: Some(env!("CARGO_PKG_VERSION").to_string()),
}
.update(&mut self.p2p.metadata_mut());
let port = match config.p2p_ipv4_port {
Port::Disabled => None,
Port::Random => Some(0),
Port::Discrete(port) => Some(port),
};
info!("Setting quic ipv4 listener to: {port:?}");
if let Err(err) = self.quic.set_ipv4_enabled(port).await {
error!("Failed to enabled quic ipv4 listener: {err}");
self.node_config
.write(|c| c.p2p_ipv4_port = Port::Disabled)
.await
.ok();
}
let port = match config.p2p_ipv6_port {
Port::Disabled => None,
Port::Random => Some(0),
Port::Discrete(port) => Some(port),
};
info!("Setting quic ipv4 listener to: {port:?}");
if let Err(err) = self.quic.set_ipv6_enabled(port).await {
error!("Failed to enabled quic ipv6 listener: {err}");
self.node_config
.write(|c| c.p2p_ipv6_port = Port::Disabled)
.await
.ok();
}
let should_revert = match config.p2p_discovery {
P2PDiscoveryState::Everyone
// TODO: Make `ContactsOnly` work
| P2PDiscoveryState::ContactsOnly => {
let mut mdns = self.mdns.lock().unwrap_or_else(PoisonError::into_inner);
if mdns.is_none() {
match Mdns::spawn(self.p2p.clone()) {
Ok(m) => {
info!("mDNS started successfully.");
*mdns = Some(m);
false
}
Err(err) => {
error!("Failed to start mDNS: {err}");
true
}
}
} else {
false
}
}
P2PDiscoveryState::Disabled => {
let mdns = {
let mut mdns = self.mdns.lock().unwrap_or_else(PoisonError::into_inner);
mdns.take()
};
if let Some(mdns) = mdns {
mdns.shutdown().await;
info!("mDNS shutdown successfully.");
}
false
},
};
// The `should_revert` bit is weird but we need this future to stay `Send` as rspc requires.
// To make it send we have to drop `quic` (a `!Send` `MutexGuard`).
// Doing it within the above scope seems to not work (even when manually calling `drop`).
if should_revert {
let _ = self
.node_config
.write(|c| c.p2p_discovery = P2PDiscoveryState::Disabled)
.await;
}
}
pub fn get_library_instances(&self, library: &Uuid) -> Vec<(RemoteIdentity, Arc<Peer>)> {
let library_id = library.to_string();
self.p2p
.peers()
.iter()
.filter(|(_, p)| p.metadata().contains_key(&library_id))
.map(|(i, p)| (*i, p.clone()))
.collect()
}
pub fn get_instance(&self, library: &Uuid, identity: RemoteIdentity) -> Option<Arc<Peer>> {
let library_id = library.to_string();
self.p2p
.peers()
.iter()
.find(|(i, p)| **i == identity && p.metadata().contains_key(&library_id))
.map(|(_, p)| p.clone())
}
pub async fn state(&self) -> serde_json::Value {
let listeners = self.p2p.listeners();
let node_config = self.node_config.get().await;
json!({
"self_identity": self.p2p.remote_identity().to_string(),
"self_peer_id": format!("{:?}", self.lp2p_peer_id),
"metadata": self.p2p.metadata().clone(),
"peers": self.p2p.peers().iter().map(|(identity, p)| json!({
"identity": identity.to_string(),
"metadata": p.metadata().clone(),
"can_connect": p.can_connect(),
"is_connected": p.is_connected(),
"active_connections": p.active_connections(),
"connection_methods": p.connection_methods().iter().map(|id| format!("{:?}", id)).collect::<Vec<_>>(),
"discovered_by": p.discovered_by().iter().map(|id| format!("{:?}", id)).collect::<Vec<_>>(),
})).collect::<Vec<_>>(),
"hooks": self.p2p.hooks().iter().map(|(id, name)| json!({
"id": format!("{:?}", id),
"name": name,
"listener_addrs": listeners.iter().find(|l| l.is_hook_id(*id)).map(|l| l.addrs.clone()),
})).collect::<Vec<_>>(),
"config": json!({
"p2p_ipv4_port": node_config.p2p_ipv4_port,
"p2p_ipv6_port": node_config.p2p_ipv6_port,
"p2p_discovery": node_config.p2p_discovery,
})
})
}
pub async fn shutdown(&self) {
// `self.p2p` will automatically take care of shutting down all the hooks. Eg. `self.quic`, `self.mdns`, etc.
self.p2p.shutdown().await;
}
}
async fn start(
this: Arc<P2PManager>,
node: Arc<Node>,
rx: Receiver<UnicastStream>,
) -> Result<(), ()> {
while let Ok(mut stream) = rx.recv_async().await {
let this = this.clone();
let node = node.clone();
tokio::spawn(async move {
println!("APPLICATION GOT STREAM: {:?}", stream); // TODO
let Ok(header) = Header::from_stream(&mut stream).await.map_err(|err| {
error!("Failed to read header from stream: {}", err);
}) else {
return;
};
match header {
Header::Ping => operations::ping::reciever(stream).await,
Header::Spacedrop(req) => {
let Err(()) = operations::spacedrop::reciever(&this, req, stream).await else {
return;
};
error!("Failed to handle Spacedrop request");
}
Header::Sync(library_id) => {
let Ok(mut tunnel) = Tunnel::responder(stream).await.map_err(|err| {
error!("Failed `Tunnel::responder`: {}", err);
}) else {
return;
};
let Ok(msg) = SyncMessage::from_stream(&mut tunnel).await.map_err(|err| {
error!("Failed `SyncMessage::from_stream`: {}", err);
}) else {
return;
};
let Ok(library) =
node.libraries
.get_library(&library_id)
.await
.ok_or_else(|| {
error!("Failed to get library '{library_id}'");
// TODO: Respond to remote client with warning!
})
else {
return;
};
match msg {
SyncMessage::NewOperations => {
let Err(()) = super::sync::responder(&mut tunnel, library).await else {
return;
};
error!("Failed to handle sync responder request");
}
};
}
Header::File(req) => {
let Err(()) = operations::request_file::receiver(&node, req, stream).await
else {
return;
};
error!("Failed to handle file request");
}
};
});
}
Ok::<_, ()>(())
}
#[derive(Debug, Serialize, Type)]
pub struct Listener2 {
pub id: String,
pub name: &'static str,
pub addrs: HashSet<SocketAddr>,
}
pub fn into_listener2(l: &[Listener]) -> Vec<Listener2> {
l.iter()
.map(|l| Listener2 {
id: format!("{:?}", l.id),
name: l.name,
addrs: l.addrs.clone(),
})
.collect()
}

View file

@ -1,7 +1,5 @@
use crate::node::{HardwareModel, Platform};
use sd_p2p::Metadata;
use std::{collections::HashMap, env, str::FromStr};
use serde::{Deserialize, Serialize};
@ -15,10 +13,9 @@ pub struct PeerMetadata {
pub version: Option<String>,
}
impl Metadata for PeerMetadata {
fn to_hashmap(self) -> HashMap<String, String> {
let mut map = HashMap::with_capacity(5);
map.insert("name".to_owned(), self.name);
impl PeerMetadata {
pub fn update(self, map: &mut HashMap<String, String>) {
map.insert("name".to_owned(), self.name.clone());
if let Some(os) = self.operating_system {
map.insert("os".to_owned(), os.to_string());
}
@ -28,13 +25,9 @@ impl Metadata for PeerMetadata {
if let Some(device_model) = self.device_model {
map.insert("device_model".to_owned(), device_model.to_string());
}
map
}
fn from_hashmap(data: &HashMap<String, String>) -> Result<Self, String>
where
Self: Sized,
{
pub fn from_hashmap(data: &HashMap<String, String>) -> Result<Self, String> {
Ok(Self {
name: data
.get("name")

View file

@ -1,22 +1,19 @@
#![warn(clippy::all, clippy::unwrap_used, clippy::panic)]
#![allow(clippy::unnecessary_cast)] // Yeah they aren't necessary on this arch, but they are on others
mod libraries;
mod library_metadata;
mod connect_hook;
mod events;
pub(super) mod libraries;
mod manager;
mod metadata;
pub mod operations;
mod p2p_events;
mod p2p_manager;
mod p2p_manager_actor;
mod peer_metadata;
mod protocol;
pub mod sync;
pub use libraries::*;
pub use library_metadata::*;
pub use p2p_events::*;
pub use p2p_manager::*;
pub use p2p_manager_actor::*;
pub use peer_metadata::*;
// pub use connect_hook::*;
pub use events::*;
pub use manager::*;
pub use metadata::*;
pub use protocol::*;
pub(super) const SPACEDRIVE_APP_ID: &str = "sd";

View file

@ -1,17 +1,12 @@
use crate::p2p::P2PManager;
use sd_p2p::PeerMessageEvent;
use std::sync::Arc;
use sd_p2p2::UnicastStream;
use tracing::debug;
/// Send a ping to all peers we are connected to
#[allow(unused)]
pub async fn ping(_p2p: Arc<P2PManager>) {
pub async fn ping() {
todo!();
}
pub(crate) async fn reciever(event: PeerMessageEvent) {
debug!("Received ping from peer '{}'", event.identity);
pub(crate) async fn reciever(stream: UnicastStream) {
debug!("Received ping from peer '{}'", stream.remote_identity());
}

View file

@ -5,12 +5,15 @@ use crate::{
};
use sd_file_path_helper::{file_path_to_handle_p2p_serve_file, IsolatedFilePathData};
use sd_p2p::{
spaceblock::{BlockSize, Range, SpaceblockRequest, SpaceblockRequests, Transfer},
spacetime::UnicastStream,
PeerMessageEvent,
};
use sd_p2p2::UnicastStream;
use sd_p2p_block::{BlockSize, Range, SpaceblockRequest, SpaceblockRequests, Transfer};
use sd_prisma::prisma::file_path;
use tokio::{
fs::File,
io::{AsyncReadExt, AsyncWrite, AsyncWriteExt, BufReader},
};
use tracing::{debug, warn};
use uuid::Uuid;
use std::{
path::Path,
@ -20,13 +23,6 @@ use std::{
},
};
use tokio::{
fs::File,
io::{AsyncReadExt, AsyncWrite, AsyncWriteExt, BufReader},
};
use tracing::{debug, warn};
use uuid::Uuid;
/// Request a file from the remote machine over P2P. This is used for preview media and quick preview.
///
/// DO NOT USE THIS WITHOUT `node.files_over_p2p_flag == true`
@ -111,9 +107,8 @@ pub(crate) async fn receiver(
file_path_id,
range,
}: HeaderFile,
event: PeerMessageEvent,
mut stream: UnicastStream,
) -> Result<(), ()> {
let mut stream = event.stream;
#[allow(clippy::panic)] // If you've made it this far that's on you.
if !node.files_over_p2p_flag.load(Ordering::Relaxed) {
panic!("Files over P2P is disabled!");

View file

@ -1,22 +1,17 @@
use crate::p2p::{Header, P2PEvent, P2PManager};
use sd_p2p::{
spaceblock::{BlockSize, Range, SpaceblockRequest, SpaceblockRequests, Transfer},
spacetunnel::RemoteIdentity,
PeerMessageEvent,
};
use std::{
borrow::Cow,
path::PathBuf,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
Arc, PoisonError,
},
time::Duration,
};
use crate::p2p::{Header, P2PEvent, P2PManager};
use futures::future::join_all;
use sd_p2p2::{RemoteIdentity, UnicastStream};
use sd_p2p_block::{BlockSize, Range, SpaceblockRequest, SpaceblockRequests, Transfer};
use tokio::{
fs::{create_dir_all, File},
io::{AsyncReadExt, AsyncWriteExt, BufReader, BufWriter},
@ -32,7 +27,6 @@ pub(crate) const SPACEDROP_TIMEOUT: Duration = Duration::from_secs(60);
// TODO: Proper error handling
pub async fn spacedrop(
p2p: Arc<P2PManager>,
// TODO: Stop using `PeerId`
identity: RemoteIdentity,
paths: Vec<PathBuf>,
) -> Result<Uuid, ()> {
@ -61,7 +55,10 @@ pub async fn spacedrop(
.await
.into_iter()
.collect::<Result<Vec<_>, std::io::Error>>()
.map_err(|_| ())? // TODO: Error handling
.map_err(|err| {
warn!("error opening file: '{err:?}'");
// TODO: Proper error type
})?
.into_iter()
.unzip();
@ -69,8 +66,18 @@ pub async fn spacedrop(
let id = Uuid::new_v4();
debug!("({id}): starting Spacedrop with peer '{identity}");
let mut stream = p2p.manager.stream(identity).await.map_err(|err| {
debug!("({id}): failed to connect: {err:?}");
let peer = p2p
.p2p
.peers()
.get(&identity)
.ok_or_else(|| {
debug!("({id}): failed to find connection method with '{identity}'");
// TODO: Proper error
})?
.clone();
let mut stream = peer.new_stream().await.map_err(|err| {
debug!("({id}): failed to connect to '{identity}': {err:?}");
// TODO: Proper error
})?;
@ -95,7 +102,7 @@ pub async fn spacedrop(
// Add 5 seconds incase the user responded on the deadline and slow network
_ = sleep(SPACEDROP_TIMEOUT + Duration::from_secs(5)) => {
debug!("({id}): timed out, cancelling");
p2p.events.0.send(P2PEvent::SpacedropTimedout { id }).ok();
p2p.events.send(P2PEvent::SpacedropTimedout { id }).ok();
return;
},
};
@ -103,18 +110,18 @@ pub async fn spacedrop(
match result {
Ok(0) => {
debug!("({id}): Spacedrop was rejected from peer '{identity}'");
p2p.events.0.send(P2PEvent::SpacedropRejected { id }).ok();
p2p.events.send(P2PEvent::SpacedropRejected { id }).ok();
return;
}
Ok(1) => {} // Okay
Ok(_) => todo!(), // TODO: Proper error
Err(_) => todo!(), // TODO: Proper error
Ok(1) => {} // Okay
Ok(_) => todo!(), // TODO: Proper error
Err(err) => todo!("{:?}", err), // TODO: Proper error
}
let cancelled = Arc::new(AtomicBool::new(false));
p2p.spacedrop_cancelations
.lock()
.await
.unwrap_or_else(PoisonError::into_inner)
.insert(id, cancelled.clone());
debug!("({id}): starting transfer");
@ -124,7 +131,6 @@ pub async fn spacedrop(
&requests,
|percent| {
p2p.events
.0
.send(P2PEvent::SpacedropProgress { id, percent })
.ok();
},
@ -138,7 +144,6 @@ pub async fn spacedrop(
debug!("({id}): failed to send file '{file_id}': {err}");
// TODO: Error to frontend
// p2p.events
// .0
// .send(P2PEvent::SpacedropFailed { id, file_id })
// .ok();
return;
@ -154,7 +159,12 @@ pub async fn spacedrop(
// TODO: Move these off the manager
impl P2PManager {
pub async fn accept_spacedrop(&self, id: Uuid, path: String) {
if let Some(chan) = self.spacedrop_pairing_reqs.lock().await.remove(&id) {
if let Some(chan) = self
.spacedrop_pairing_reqs
.lock()
.unwrap_or_else(PoisonError::into_inner)
.remove(&id)
{
chan.send(Some(path))
.map_err(|err| {
warn!("error accepting Spacedrop '{id:?}': '{err:?}'");
@ -164,7 +174,12 @@ impl P2PManager {
}
pub async fn reject_spacedrop(&self, id: Uuid) {
if let Some(chan) = self.spacedrop_pairing_reqs.lock().await.remove(&id) {
if let Some(chan) = self
.spacedrop_pairing_reqs
.lock()
.unwrap_or_else(PoisonError::into_inner)
.remove(&id)
{
chan.send(None)
.map_err(|err| {
warn!("error rejecting Spacedrop '{id:?}': '{err:?}'");
@ -174,7 +189,12 @@ impl P2PManager {
}
pub async fn cancel_spacedrop(&self, id: Uuid) {
if let Some(cancelled) = self.spacedrop_cancelations.lock().await.remove(&id) {
if let Some(cancelled) = self
.spacedrop_cancelations
.lock()
.unwrap_or_else(PoisonError::into_inner)
.remove(&id)
{
cancelled.store(true, Ordering::Relaxed);
}
}
@ -183,26 +203,27 @@ impl P2PManager {
pub(crate) async fn reciever(
this: &Arc<P2PManager>,
req: SpaceblockRequests,
event: PeerMessageEvent,
mut stream: UnicastStream,
) -> Result<(), ()> {
let id = req.id;
let mut stream = event.stream;
let (tx, rx) = oneshot::channel();
info!(
"({id}): received '{}' files from peer '{}' with block size '{:?}'",
req.requests.len(),
event.identity,
stream.remote_identity(),
req.block_size
);
this.spacedrop_pairing_reqs.lock().await.insert(id, tx);
this.spacedrop_pairing_reqs
.lock()
.unwrap_or_else(PoisonError::into_inner)
.insert(id, tx);
if this
.events
.0
.send(P2PEvent::SpacedropRequest {
id,
identity: event.identity,
identity: stream.remote_identity(),
peer_name: "Unknown".into(),
// TODO: A better solution to this
// manager
@ -245,7 +266,7 @@ pub(crate) async fn reciever(
let cancelled = Arc::new(AtomicBool::new(false));
this.spacedrop_cancelations
.lock()
.await
.unwrap_or_else(PoisonError::into_inner)
.insert(id, cancelled.clone());
stream.write_all(&[1]).await.map_err(|err| {
@ -258,7 +279,7 @@ pub(crate) async fn reciever(
let names = req.requests.iter().map(|req| req.name.clone()).collect::<Vec<_>>();
let mut transfer = Transfer::new(&req, |percent| {
this.events.0.send(P2PEvent::SpacedropProgress { id, percent }).ok();
this.events.send(P2PEvent::SpacedropProgress { id, percent }).ok();
}, &cancelled);
let file_path = PathBuf::from(file_path);

View file

@ -1,42 +0,0 @@
use sd_p2p::spacetunnel::RemoteIdentity;
use serde::Serialize;
use specta::Type;
use uuid::Uuid;
use super::PeerMetadata;
/// TODO: P2P event for the frontend
#[derive(Debug, Clone, Serialize, Type)]
#[serde(tag = "type")]
pub enum P2PEvent {
DiscoveredPeer {
identity: RemoteIdentity,
metadata: PeerMetadata,
},
ExpiredPeer {
identity: RemoteIdentity,
},
ConnectedPeer {
identity: RemoteIdentity,
},
DisconnectedPeer {
identity: RemoteIdentity,
},
SpacedropRequest {
id: Uuid,
identity: RemoteIdentity,
peer_name: String,
files: Vec<String>,
},
SpacedropProgress {
id: Uuid,
percent: u8,
},
SpacedropTimedout {
id: Uuid,
},
SpacedropRejected {
id: Uuid,
},
}

View file

@ -1,169 +0,0 @@
use crate::{
node::{config, get_hardware_model_name, HardwareModel},
p2p::{OperatingSystem, SPACEDRIVE_APP_ID},
};
use sd_p2p::{
spacetunnel::RemoteIdentity, Manager, ManagerConfig, ManagerError, PeerStatus, Service,
};
use std::{
collections::{HashMap, HashSet},
net::SocketAddr,
sync::{atomic::AtomicBool, Arc},
};
use serde::Serialize;
use specta::Type;
use tokio::sync::{broadcast, mpsc, oneshot, Mutex};
use tracing::info;
use uuid::Uuid;
use super::{LibraryMetadata, LibraryServices, P2PEvent, P2PManagerActor, PeerMetadata};
pub struct P2PManager {
pub(crate) node: Service<PeerMetadata>,
pub(crate) libraries: LibraryServices,
pub events: (broadcast::Sender<P2PEvent>, broadcast::Receiver<P2PEvent>),
pub manager: Arc<Manager>,
pub(super) spacedrop_pairing_reqs: Arc<Mutex<HashMap<Uuid, oneshot::Sender<Option<String>>>>>,
pub(super) spacedrop_cancelations: Arc<Mutex<HashMap<Uuid, Arc<AtomicBool>>>>,
node_config_manager: Arc<config::Manager>,
}
impl P2PManager {
pub async fn new(
node_config: Arc<config::Manager>,
libraries: Arc<crate::library::Libraries>,
) -> Result<(Arc<P2PManager>, P2PManagerActor), ManagerError> {
let (keypair, manager_config) = {
let config = node_config.get().await;
(config.keypair, config.p2p.clone())
};
let (manager, stream) =
sd_p2p::Manager::new(SPACEDRIVE_APP_ID, &keypair, manager_config).await?;
info!(
"Node RemoteIdentity('{}') libp2p::PeerId('{}') is now online listening at addresses: {:?}",
manager.identity(),
manager.libp2p_peer_id(),
stream.listen_addrs()
);
let (register_service_tx, register_service_rx) = mpsc::channel(10);
let this = Arc::new(Self {
node: Service::new("node", manager.clone())
.expect("Hardcoded service name will never be a duplicate!"),
libraries: LibraryServices::new(register_service_tx),
events: broadcast::channel(100),
manager,
spacedrop_pairing_reqs: Default::default(),
spacedrop_cancelations: Default::default(),
node_config_manager: node_config,
});
this.update_metadata().await;
tokio::spawn(LibraryServices::start(this.clone(), libraries));
Ok((
this.clone(),
P2PManagerActor {
manager: this,
stream,
register_service_rx,
},
))
}
pub fn get_library_service(&self, library_id: &Uuid) -> Option<Arc<Service<LibraryMetadata>>> {
self.libraries.get(library_id)
}
pub async fn update_metadata(&self) {
self.node.update({
let config = self.node_config_manager.get().await;
PeerMetadata {
name: config.name.clone(),
operating_system: Some(OperatingSystem::get_os()),
device_model: Some(get_hardware_model_name().unwrap_or(HardwareModel::Other)),
version: Some(env!("CARGO_PKG_VERSION").to_string()),
}
});
}
pub fn subscribe(&self) -> broadcast::Receiver<P2PEvent> {
self.events.0.subscribe()
}
// TODO: Replace this with a better system that is more built into `sd-p2p` crate
pub fn state(&self) -> P2PState {
let (
self_peer_id,
self_identity,
config,
manager_connected,
manager_connections,
dicovery_services,
discovery_discovered,
discovery_known,
) = self.manager.get_debug_state();
P2PState {
node: self.node.get_state(),
libraries: self
.libraries
.libraries()
.into_iter()
.map(|(id, lib)| (id, lib.get_state()))
.collect(),
self_peer_id: PeerId(self_peer_id),
self_identity,
config,
manager_connected: manager_connected
.into_iter()
.map(|(k, v)| (PeerId(k), v))
.collect(),
manager_connections: manager_connections.into_iter().map(PeerId).collect(),
dicovery_services,
discovery_discovered: discovery_discovered
.into_iter()
.map(|(k, v)| {
(
k,
v.into_iter()
.map(|(k, (k1, v, b))| (k, (PeerId(k1), v, b)))
.collect(),
)
})
.collect(),
discovery_known,
}
}
pub async fn shutdown(&self) {
self.manager.shutdown().await;
}
}
#[derive(Debug, Serialize, Type)]
#[allow(clippy::type_complexity)]
pub struct P2PState {
node: HashMap<RemoteIdentity, PeerStatus>,
libraries: Vec<(Uuid, HashMap<RemoteIdentity, PeerStatus>)>,
self_peer_id: PeerId,
self_identity: RemoteIdentity,
config: ManagerConfig,
manager_connected: HashMap<PeerId, RemoteIdentity>,
manager_connections: HashSet<PeerId>,
dicovery_services: HashMap<String, Option<HashMap<String, String>>>,
discovery_discovered: HashMap<
String,
HashMap<RemoteIdentity, (PeerId, HashMap<String, String>, Vec<SocketAddr>)>,
>,
discovery_known: HashMap<String, HashSet<RemoteIdentity>>,
}
// TODO: Get this back into `sd-p2p` but keep it private
#[derive(Debug, Serialize, Type, Hash, Eq, PartialEq, Ord, PartialOrd, Clone)]
pub struct PeerId(#[specta(type = String)] sd_p2p::internal::PeerId);

View file

@ -1,131 +0,0 @@
use crate::Node;
use sd_p2p::{spacetunnel::Tunnel, Event, ManagerStream, Service, ServiceEvent};
use std::sync::Arc;
use futures::StreamExt;
use tokio::sync::mpsc;
use tracing::error;
use super::{operations, sync::SyncMessage, Header, LibraryMetadata, P2PEvent, P2PManager};
pub struct P2PManagerActor {
pub(super) manager: Arc<P2PManager>,
pub(super) stream: ManagerStream,
pub(super) register_service_rx: mpsc::Receiver<Arc<Service<LibraryMetadata>>>,
}
impl P2PManagerActor {
pub fn start(self, node: Arc<Node>) {
let Self {
manager: this,
mut stream,
mut register_service_rx,
} = self;
tokio::spawn({
async move {
let mut node_rx = this.node.listen();
loop {
tokio::select! {
// TODO: We ignore the response of this but I suspect it will be useful in the future so it stays for now.
Some(_event) = register_service_rx.recv() => {},
// TODO: We should subscribe to library-level events too but frontend isn't cut out for them right now.
Some(Ok(event)) = node_rx.next() => {
this.events.0
.send(match event {
ServiceEvent::Discovered { identity, metadata } =>
P2PEvent::DiscoveredPeer {
identity,
metadata,
},
ServiceEvent::Expired { identity } =>
P2PEvent::ExpiredPeer {
identity,
},
})
.map_err(|_| error!("Failed to send event to p2p event stream!"))
.ok();
}
Some(event) = stream.next() => {
match event {
Event::PeerConnected(event) => {
this.events
.0
.send(P2PEvent::ConnectedPeer {
identity: event.identity,
})
.map_err(|_| error!("Failed to send event to p2p event stream!"))
.ok();
}
Event::PeerDisconnected(identity) => {
this.events
.0
.send(P2PEvent::DisconnectedPeer { identity })
.map_err(|_| error!("Failed to send event to p2p event stream!"))
.ok();
}
Event::PeerMessage(mut event) => {
let this = this.clone();
let node = node.clone();
tokio::spawn(async move {
let header = Header::from_stream(&mut event.stream)
.await
.map_err(|err| {
error!("Failed to read header from stream: {}", err);
})?;
match header {
Header::Ping => operations::ping::reciever(event).await,
Header::Spacedrop(req) => {
operations::spacedrop::reciever(&this, req, event).await?
}
Header::Sync(library_id) => {
let mut tunnel =
Tunnel::responder(event.stream).await.map_err(|err| {
error!("Failed `Tunnel::responder`: {}", err);
})?;
let msg =
SyncMessage::from_stream(&mut tunnel).await.map_err(|err| {
error!("Failed `SyncMessage::from_stream`: {}", err);
})?;
let library =
node.libraries.get_library(&library_id).await.ok_or_else(|| {
error!("Failed to get library '{library_id}'");
// TODO: Respond to remote client with warning!
})?;
match msg {
SyncMessage::NewOperations => {
super::sync::responder(&mut tunnel, library).await?;
}
};
}
Header::File(req) => {
operations::request_file::receiver(&node, req, event).await?;
}
}
Ok::<_, ()>(())
});
}
Event::Shutdown => break,
_ => {}
}
}
}
}
error!(
"Manager event stream closed! The core is unstable from this point forward!"
);
}
});
}
}

View file

@ -1,8 +1,5 @@
use sd_p2p::{
proto::{decode, encode},
spaceblock::{Range, SpaceblockRequests, SpaceblockRequestsError},
};
use sd_p2p_block::{Range, SpaceblockRequests, SpaceblockRequestsError};
use sd_p2p_proto::{decode, encode};
use thiserror::Error;
use tokio::io::{AsyncRead, AsyncReadExt};
use uuid::Uuid;

View file

@ -5,10 +5,7 @@ use crate::{
sync::{self, GetOpsArgs},
};
use sd_p2p::{
proto::{decode, encode},
spacetunnel::Tunnel,
};
use sd_p2p_proto::{decode, encode};
use sd_sync::CRDTOperation;
use std::sync::Arc;
@ -17,16 +14,18 @@ use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
use tracing::*;
use uuid::Uuid;
use super::{Header, P2PManager};
use super::P2PManager;
mod proto;
pub use proto::*;
pub use originator::run as originator;
mod originator {
use crate::p2p::Header;
use super::*;
use responder::tx as rx;
use sd_p2p::PeerStatus;
use sd_p2p_tunnel::Tunnel;
pub mod tx {
use super::*;
@ -84,28 +83,19 @@ mod originator {
/// REMEMBER: This only syncs one direction!
pub async fn run(library_id: Uuid, sync: &Arc<sync::Manager>, p2p: &Arc<super::P2PManager>) {
let service = p2p.get_library_service(&library_id).unwrap();
// TODO: Deduplicate any duplicate peer ids -> This is an edge case but still
for (remote_identity, status) in service.get_state() {
let PeerStatus::Connected = status else {
for (remote_identity, peer) in p2p.get_library_instances(&library_id) {
if !peer.is_connected() {
continue;
};
let sync = sync.clone();
let p2p = p2p.clone();
let service = service.clone();
tokio::spawn(async move {
debug!(
"Alerting peer '{remote_identity:?}' of new sync events for library '{library_id:?}'"
);
let mut stream = service
.connect(p2p.manager.clone(), &remote_identity)
.await
.map_err(|_| ())
.unwrap(); // TODO: handle providing incorrect peer id
let mut stream = peer.new_stream().await.unwrap();
stream
.write_all(&Header::Sync(library_id).to_bytes())

View file

@ -1,4 +1,4 @@
use sd_p2p::proto::decode;
use sd_p2p_proto::decode;
use tokio::io::{AsyncRead, AsyncReadExt};
// will probs have more variants in future

View file

@ -14,6 +14,11 @@ pub enum MaybeUndefined<T> {
}
impl<T> MaybeUndefined<T> {
// `Undefined` will return `true` else `false`.
pub fn is_undefined(&self) -> bool {
matches!(self, Self::Undefined)
}
// `Null | Value(T)` will return `true` else `false`.
pub fn is_defined(&self) -> bool {
!matches!(self, Self::Undefined)

View file

@ -6,7 +6,7 @@ edition.workspace = true
repository.workspace = true
[dependencies]
sd-p2p = { path = "../p2p" }
sd-p2p2 = { path = "../p2p2" }
reqwest = "0.11.22"
serde.workspace = true
serde_json.workspace = true

View file

@ -3,7 +3,7 @@ pub mod auth;
use std::{future::Future, sync::Arc};
use auth::OAuthToken;
use sd_p2p::spacetunnel::RemoteIdentity;
use sd_p2p2::RemoteIdentity;
use serde::{Deserialize, Serialize};
use serde_json::json;
use specta::Type;
@ -47,8 +47,11 @@ pub struct Instance {
pub id: String,
pub uuid: Uuid,
pub identity: RemoteIdentity,
#[serde(rename = "nodeId")]
pub node_id: Uuid,
#[serde(rename = "nodeName")]
pub node_name: String,
#[serde(rename = "nodePlatform")]
pub node_platform: u8,
}
@ -197,10 +200,11 @@ pub mod library {
use super::*;
#[derive(Debug, Deserialize)]
pub struct Response {
pub struct CreateResult {
pub id: String,
}
#[allow(clippy::too_many_arguments)]
pub async fn exec(
config: RequestConfig,
library_id: Uuid,
@ -210,7 +214,7 @@ pub mod library {
node_id: Uuid,
node_name: &str,
node_platform: u8,
) -> Result<Response, Error> {
) -> Result<CreateResult, Error> {
let Some(auth_token) = config.auth_token else {
return Err(Error("Authentication required".to_string()));
};

View file

@ -0,0 +1,15 @@
[package]
name = "sd-p2p-block"
version = "0.1.0"
authors = ["Oscar Beaumont <oscar@otbeaumont.me>"]
license.workspace = true
edition.workspace = true
repository.workspace = true
[dependencies]
sd-p2p2 = { path = "../p2p2" }
sd-p2p-proto = { path = "../p2p-proto" }
thiserror.workspace = true
tokio.workspace = true
tracing.workspace = true
uuid.workspace = true

View file

@ -58,7 +58,7 @@ impl<'a> Block<'a> {
mod tests {
use std::io::Cursor;
use crate::spaceblock::BlockSize;
use crate::BlockSize;
use super::*;

View file

@ -1,3 +1,6 @@
//! TODO
// TODO: Clippy lints here
//! Spaceblock is a file transfer protocol that uses a block based system to transfer files.
//! This protocol is modelled after `SyncThing`'s BEP protocol. A huge thanks to it's original authors!
//! You can read more about it here: <https://docs.syncthing.net/specs/bep-v1.html>
@ -21,10 +24,8 @@ use tokio::{
};
use tracing::debug;
use crate::{
proto::{decode, encode},
spacetime::UnicastStream,
};
use sd_p2p2::UnicastStream;
use sd_p2p_proto::{decode, encode};
mod block;
mod block_size;

View file

@ -4,7 +4,7 @@ use thiserror::Error;
use tokio::io::{AsyncRead, AsyncReadExt};
use uuid::Uuid;
use crate::proto::{decode, encode};
use sd_p2p_proto::{decode, encode};
use super::BlockSize;

View file

@ -0,0 +1,13 @@
[package]
name = "sd-p2p-proto"
version = "0.1.0"
authors = ["Oscar Beaumont <oscar@otbeaumont.me>"]
license.workspace = true
edition.workspace = true
repository.workspace = true
[dependencies]
ed25519-dalek = "2.1.0"
thiserror.workspace = true
tokio = { workspace = true, features = ["io-util"] }
uuid.workspace = true

View file

@ -2,11 +2,22 @@
//!
//! Eventually these will be deprecated by macros but I can't find one which supports large payloads (basically it needs to write to async stream not in-memory bufffer) -> Binario is my own prototype of a Rust library to do this but it's not prod ready yet.
//!
use thiserror::Error;
use uuid::Uuid;
// TODO: Remove this from this crate cause it's a leak of responsibility.
#[derive(Debug, Error)]
#[error(transparent)]
pub enum SpaceTunnelIdentityErr {
#[error("{0}")]
Darlek(#[from] ed25519_dalek::ed25519::Error),
#[error("Invalid key length")]
InvalidKeyLength,
}
pub mod decode {
use crate::spacetunnel::IdentityErr;
use crate::SpaceTunnelIdentityErr;
use super::{Error, Uuid};
use tokio::io::{AsyncRead, AsyncReadExt};
@ -20,7 +31,7 @@ pub mod decode {
#[error("NameFormatError({0})")]
NameFormatError(#[from] std::string::FromUtf8Error),
#[error("InvalidRemoteIdentity({0})")]
InvalidRemoteIdentity(#[from] IdentityErr),
InvalidRemoteIdentity(#[from] SpaceTunnelIdentityErr),
}
/// Deserialize uuid as it's fixed size data.

View file

@ -0,0 +1,11 @@
[package]
name = "sd-p2p-tunnel"
version = "0.1.0"
authors = ["Oscar Beaumont <oscar@otbeaumont.me>"]
license.workspace = true
edition.workspace = true
repository.workspace = true
[dependencies]
sd-p2p2 = { path = "../p2p2" }
tokio = { workspace = true, features = ["io-util"] }

View file

@ -1,7 +1,6 @@
//! A system for creating encrypted tunnels between peers over untrusted connections.
mod identity;
mod tunnel;
pub use identity::*;
pub use sd_p2p2::{Identity, IdentityErr, RemoteIdentity};
pub use tunnel::*;

View file

@ -6,7 +6,7 @@ use std::{
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, ReadBuf};
use crate::spacetime::UnicastStream;
use sd_p2p2::UnicastStream;
#[derive(Debug)]
pub struct Tunnel {

View file

@ -1,141 +0,0 @@
// use std::{collections::HashMap, env, time::Duration};
// use sd_p2p::{Event, Keypair, Manager, Metadata};
// use tokio::{io::AsyncReadExt, time::sleep};
// use tracing::{debug, error, info};
// #[derive(Debug, Clone)]
// pub struct PeerMetadata {
// name: String,
// }
// impl Metadata for PeerMetadata {
// fn to_hashmap(self) -> HashMap<String, String> {
// HashMap::from([("name".to_owned(), self.name)])
// }
// fn from_hashmap(data: &HashMap<String, String>) -> Result<Self, String>
// where
// Self: Sized,
// {
// Ok(Self {
// name: data
// .get("name")
// .ok_or_else(|| {
// "DNS record for field 'name' missing. Unable to decode 'PeerMetadata'!"
// .to_owned()
// })?
// .to_owned(),
// })
// }
// }
// #[tokio::main]
// async fn main() {
// tracing_subscriber::fmt()
// .with_env_filter(
// tracing_subscriber::EnvFilter::from_default_env()
// .add_directive("basic=trace".parse().unwrap())
// .add_directive("sd-p2p=trace".parse().unwrap())
// .add_directive("info".parse().unwrap()),
// )
// .try_init()
// .unwrap();
// let keypair = Keypair::generate();
// let metadata_manager = MetadataManager::new(PeerMetadata {
// name: "TODO".to_string(),
// });
// let (manager, mut stream) = Manager::new("p2p-demo", &keypair, Default::default())
// .await
// .unwrap();
// info!(
// "Node '{}' is now online listening at addresses: {:?}",
// manager.identity(),
// stream.listen_addrs()
// );
// tokio::spawn(async move {
// let mut shutdown = false;
// // Your application must keeping poll this stream to keep the P2P system running
// while let Some(event) = stream.next().await {
// match event {
// // TODO: Refactor example to use `Service` struct
// // Event::PeerDiscovered(event) => {
// // println!(
// // "Discovered peer by id '{}' with address '{:?}' and metadata: {:?}",
// // event.peer_id, event.addresses, event.metadata
// // );
// // event.dial().await; // We connect to everyone we find on the network. Your app will probs wanna restrict this!
// // }
// Event::PeerMessage(mut event) => {
// debug!("Peer '{}' established unicast stream", event.identity);
// tokio::spawn(async move {
// let mut buf = [0; 100];
// let n = event.stream.read(&mut buf).await.unwrap();
// println!("GOT UNICAST: {:?}", std::str::from_utf8(&buf[..n]).unwrap());
// });
// }
// Event::PeerBroadcast(mut event) => {
// debug!("Peer '{}' established broadcast stream", event.identity);
// tokio::spawn(async move {
// let mut buf = [0; 100];
// let n = event.stream.read(&mut buf).await.unwrap();
// println!(
// "GOT BROADCAST: {:?}",
// std::str::from_utf8(&buf[..n]).unwrap()
// );
// });
// }
// Event::Shutdown => {
// info!("Manager shutdown!");
// shutdown = true;
// break;
// }
// _ => debug!("event: {:?}", event),
// }
// }
// if !shutdown {
// error!("Manager event stream closed! The core is unstable from this point forward!");
// // process.exit(1); // TODO: Should I?
// }
// });
// if env::var("PING").as_deref() != Ok("skip") {
// let manager = manager.clone();
// tokio::spawn(async move {
// sleep(Duration::from_millis(500)).await;
// // Send pings to every client every 3 second after startup
// loop {
// sleep(Duration::from_secs(3)).await;
// manager
// .broadcast(
// format!("Hello World From {}", keypair.peer_id())
// .as_bytes()
// .to_vec(),
// )
// .await;
// debug!("Sent ping broadcast to all connected peers!");
// }
// });
// }
// // TODO: proper shutdown
// // https://docs.rs/ctrlc/latest/ctrlc/
// // https://docs.rs/system_shutdown/latest/system_shutdown/
// tokio::time::sleep(Duration::from_secs(100)).await;
// manager.shutdown().await; // It is super highly recommended to shutdown the manager before exiting your application so an Mdns update can be broadcasted
// }
fn main() {
todo!("TODO: Update example");
}

View file

@ -1,156 +0,0 @@
use std::{
collections::{HashMap, HashSet},
future::poll_fn,
net::SocketAddr,
sync::{Arc, PoisonError, RwLock},
task::Poll,
};
use libp2p::PeerId;
use tokio::sync::{broadcast, mpsc};
use tracing::trace;
use crate::{spacetunnel::RemoteIdentity, ManagerConfig, Mdns, ServiceEventInternal};
type ServiceName = String;
pub type ListenAddrs = HashSet<SocketAddr>;
pub type State = Arc<RwLock<DiscoveryManagerState>>;
/// `DiscoveryManager` controls all user-defined [Service]'s and connects them with the network through mDNS and other discovery protocols
pub struct DiscoveryManager {
pub(crate) state: State,
pub(crate) listen_addrs: ListenAddrs,
pub(crate) application_name: &'static str,
pub(crate) identity: RemoteIdentity,
pub(crate) peer_id: PeerId,
pub(crate) mdns: Option<Mdns>,
// TODO: Split these off `DiscoveryManagerState` and parse around on their own struct???
pub(crate) do_broadcast_rx: broadcast::Receiver<()>,
pub(crate) service_shutdown_rx: mpsc::Receiver<String>,
}
impl DiscoveryManager {
pub(crate) fn new(
application_name: &'static str,
identity: RemoteIdentity,
peer_id: PeerId,
config: &ManagerConfig,
state: State,
service_shutdown_rx: mpsc::Receiver<String>,
) -> Result<Self, mdns_sd::Error> {
let mut mdns = None;
if config.enabled {
mdns = Some(Mdns::new(application_name, identity, peer_id)?);
}
let do_broadcast_rx = state
.read()
.unwrap_or_else(PoisonError::into_inner)
.do_broadcast
.subscribe();
Ok(Self {
state,
listen_addrs: Default::default(),
application_name,
identity,
peer_id,
mdns,
do_broadcast_rx,
service_shutdown_rx,
})
}
/// is called on changes to `self.services` to make sure all providers update their records
pub(crate) fn do_advertisement(&mut self) {
trace!("Broadcasting new service records");
if let Some(mdns) = &mut self.mdns {
mdns.do_advertisement(&self.listen_addrs, &self.state);
}
}
pub(crate) async fn poll(&mut self) {
tokio::select! {
_ = self.do_broadcast_rx.recv() => self.do_advertisement(),
service_name = self.service_shutdown_rx.recv() => {
if let Some(service_name) = service_name {
let mut state = self.state.write().unwrap_or_else(PoisonError::into_inner);
state.services.remove(&service_name);
state.discovered.remove(&service_name);
state.known.remove(&service_name);
}
// TODO
self.do_advertisement();
}
() = poll_fn(|cx| {
if let Some(mdns) = &mut self.mdns {
return mdns.poll(cx, &self.listen_addrs, &self.state);
}
Poll::Pending
}) => {},
}
}
pub(crate) fn shutdown(&self) {
if let Some(mdns) = &self.mdns {
mdns.shutdown();
}
}
}
#[derive(Debug, Clone)]
#[allow(clippy::type_complexity)]
pub struct DiscoveryManagerState {
/// A list of services the current node is advertising w/ their metadata
pub(crate) services: HashMap<
ServiceName,
(
broadcast::Sender<(String, ServiceEventInternal)>,
// Will be `None` prior to the first `.set` call
Option<HashMap<String, String>>,
),
>,
/// A map of organically discovered peers
pub(crate) discovered: HashMap<ServiceName, HashMap<RemoteIdentity, DiscoveredPeerCandidate>>,
/// A map of peers we know about. These may be connected or not avaiable.
/// This is designed around the Relay/NAT hole punching service where we need to emit who we wanna discover
/// Note: this may contain duplicates with `discovered` as they will *not* be removed from here when found
pub(crate) known: HashMap<ServiceName, HashSet<RemoteIdentity>>,
/// Used to trigger an rebroadcast. This should be called when mutating this struct.
/// You are intended to clone out of this instead of locking the whole struct's `RwLock` each time you wanna use it.
/// This is a channel with a capacity of 1. If sending fails we know someone else has already requested broadcast and we can ignore the error.
pub(crate) do_broadcast: broadcast::Sender<()>,
/// Used to trigger the removal of a `Service`. This is used in the `impl Drop for Service`
/// You are intended to clone out of this instead of locking the whole struct's `RwLock` each time you wanna use it.
pub(crate) service_shutdown_tx: mpsc::Sender<String>,
}
impl DiscoveryManagerState {
#[must_use]
pub fn new() -> (Arc<RwLock<Self>>, mpsc::Receiver<String>) {
let (service_shutdown_tx, service_shutdown_rx) = mpsc::channel(10);
(
Arc::new(RwLock::new(Self {
services: Default::default(),
discovered: Default::default(),
known: Default::default(),
do_broadcast: broadcast::channel(1).0,
service_shutdown_tx,
})),
service_shutdown_rx,
)
}
}
#[derive(Debug, Clone)]
pub struct DiscoveredPeerCandidate {
pub(crate) peer_id: PeerId,
pub(crate) meta: HashMap<String, String>,
pub(crate) addresses: Vec<SocketAddr>,
}

View file

@ -1,367 +0,0 @@
use std::{
collections::HashMap,
net::SocketAddr,
pin::Pin,
str::FromStr,
sync::PoisonError,
task::{Context, Poll},
thread::sleep,
time::Duration,
};
use futures_core::Stream;
use libp2p::{
futures::{FutureExt, StreamExt},
PeerId,
};
use mdns_sd::{ServiceDaemon, ServiceEvent, ServiceInfo};
use streamunordered::{StreamUnordered, StreamYield};
use tokio::time::{sleep_until, Instant, Sleep};
use tracing::{error, trace, warn};
use crate::{
spacetunnel::RemoteIdentity, DiscoveredPeerCandidate, ListenAddrs, ServiceEventInternal, State,
};
/// TODO
const MDNS_READVERTISEMENT_INTERVAL: Duration = Duration::from_secs(60); // Every minute re-advertise
pub struct Mdns {
identity: RemoteIdentity,
peer_id: PeerId,
service_name: String,
advertised_services: Vec<String>,
mdns_daemon: ServiceDaemon,
next_mdns_advertisement: Pin<Box<Sleep>>,
// This is an ugly workaround for: https://github.com/keepsimple1/mdns-sd/issues/145
mdns_rx: StreamUnordered<MdnsRecv>,
// This is hacky but it lets us go from service name back to `RemoteIdentity` when removing the service.
// During service removal we only have the service name (not metadata) but during service discovery we insert into this map.
tracked_services: HashMap<String /* Service FQDN */, TrackedService>,
}
struct TrackedService {
service_name: String,
identity: RemoteIdentity,
}
impl Mdns {
pub(crate) fn new(
application_name: &'static str,
identity: RemoteIdentity,
peer_id: PeerId,
) -> Result<Self, mdns_sd::Error> {
let mdns_daemon = ServiceDaemon::new()?;
Ok(Self {
identity,
peer_id,
service_name: format!("_{application_name}._udp.local."),
advertised_services: Vec::new(),
mdns_daemon,
next_mdns_advertisement: Box::pin(sleep_until(Instant::now())), // Trigger an advertisement immediately
mdns_rx: StreamUnordered::new(),
tracked_services: HashMap::new(),
})
}
/// Do an mdns advertisement to the network.
pub(super) fn do_advertisement(&mut self, listen_addrs: &ListenAddrs, state: &State) {
trace!("doing mDNS advertisement!");
// TODO: Second stage rate-limit
let mut ports_to_service = HashMap::new();
for addr in listen_addrs {
ports_to_service
.entry(addr.port())
.or_insert_with(Vec::new)
.push(addr.ip());
}
// This method takes `&mut self` so we know we have exclusive access to `advertised_services`
let mut advertised_services_to_remove = self.advertised_services.clone();
let state = state.read().unwrap_or_else(PoisonError::into_inner);
for (port, ips) in ports_to_service {
for (service_name, (_, metadata)) in &state.services {
let Some(metadata) = metadata else {
continue;
};
let mut meta = metadata.clone();
meta.insert("__peer_id".into(), self.peer_id.to_string());
meta.insert("__service".into(), service_name.to_string());
meta.insert("__identity".into(), self.identity.to_string());
// The max length of an MDNS record is painful so we just hash the data to come up with a pseudo-random but deterministic value.
// The full values are stored within TXT records.
let my_name = String::from_utf8_lossy(&base91::slice_encode(
sha256::digest(format!("{}_{}", service_name, self.identity)).as_bytes(),
))[..63]
.to_string();
let service_domain = format!("_{service_name}._sub.{}", self.service_name);
let service = match ServiceInfo::new(
&service_domain,
&my_name[..63], // 63 as long as the mDNS spec will allow us
&format!("{}.{}.", service_name, self.identity), // TODO: Should this change???
&*ips,
port,
Some(meta.clone()), // TODO: Prevent the user defining a value that overflows a DNS record
) {
Ok(service) => service, // TODO: .enable_addr_auto(), // TODO: using autoaddrs or not???
Err(err) => {
warn!("error creating mdns service info: {}", err);
continue;
}
};
let service_name = service.get_fullname().to_string();
advertised_services_to_remove.retain(|s| *s != service_name);
self.advertised_services.push(service_name);
if !self
.mdns_rx
.iter_with_token()
.any(|(s, _)| s.1 == service_domain)
{
let service = match self.mdns_daemon.browse(&service_domain) {
Ok(v) => v,
Err(err) => {
error!("error browsing mdns service: {}", err);
return;
}
};
self.mdns_rx
.insert(MdnsRecv(service.into_stream(), service_domain));
}
// TODO: Do a proper diff and remove old services
trace!("advertising mdns service: {:?}", service);
match self.mdns_daemon.register(service) {
Ok(()) => {}
Err(err) => warn!("error registering mdns service: {}", err),
}
}
}
for service_domain in advertised_services_to_remove {
if let Some((_, token)) = self
.mdns_rx
.iter_with_token()
.find(|(s, _)| s.1 == service_domain)
{
Pin::new(&mut self.mdns_rx).remove(token);
}
if let Err(err) = self.mdns_daemon.unregister(&service_domain) {
warn!("error unregistering mdns service: {}", err);
}
}
// If mDNS advertisement is not queued in future, queue one
if self.next_mdns_advertisement.is_elapsed() {
self.next_mdns_advertisement =
Box::pin(sleep_until(Instant::now() + MDNS_READVERTISEMENT_INTERVAL));
}
}
pub(crate) fn poll(
&mut self,
cx: &mut Context<'_>,
listen_addrs: &ListenAddrs,
state: &State,
) -> Poll<()> {
let mut is_pending = false;
while !is_pending {
match self.next_mdns_advertisement.poll_unpin(cx) {
Poll::Ready(()) => self.do_advertisement(listen_addrs, state),
Poll::Pending => is_pending = true,
}
match self.mdns_rx.poll_next_unpin(cx) {
Poll::Ready(Some((result, _))) => match result {
StreamYield::Item(event) => self.on_event(event, state),
StreamYield::Finished(_) => {}
},
Poll::Ready(None) => {}
Poll::Pending => is_pending = true,
}
}
Poll::Pending
}
fn on_event(&mut self, event: ServiceEvent, state: &State) {
match event {
ServiceEvent::SearchStarted(_) => {}
ServiceEvent::ServiceFound(_, _) => {}
ServiceEvent::ServiceResolved(info) => {
let Some(service_name) = info.get_properties().get("__service") else {
warn!(
"resolved mDNS peer advertising itself with missing '__service' metadata"
);
return;
};
let service_name = service_name.val_str();
let Some(identity) = info.get_properties().get("__identity") else {
warn!(
"resolved mDNS peer advertising itself with missing '__identity' metadata"
);
return;
};
let identity = identity.val_str();
println!("\t {:?} {:?}", info.get_fullname(), self.service_name); // TODO
// if !service_type.ends_with(&self.service_name) {
// warn!(
// "resolved mDNS peer advertising itself with invalid service type '{service_type}'"
// );
// return;
// }
let Ok(identity) = RemoteIdentity::from_str(identity) else {
warn!("resolved peer advertising itself with an invalid RemoteIdentity('{identity}')");
return;
};
// Prevent discovery of the current peer.
if identity == self.identity {
return;
}
self.tracked_services.insert(
info.get_fullname().to_string(),
TrackedService {
service_name: service_name.to_string(),
identity,
},
);
let mut meta = info
.get_properties()
.iter()
.map(|v| (v.key().to_owned(), v.val_str().to_owned()))
.collect::<HashMap<_, _>>();
let Some(peer_id) = meta.remove("__peer_id") else {
warn!(
"resolved mDNS peer advertising itself with missing '__peer_id' metadata"
);
return;
};
let Ok(peer_id) = PeerId::from_str(&peer_id) else {
warn!(
"resolved mDNS peer advertising itself with invalid '__peer_id' metadata"
);
return;
};
let mut state = state.write().unwrap_or_else(PoisonError::into_inner);
if let Some((tx, _)) = state.services.get_mut(service_name) {
if let Err(err) = tx.send((
service_name.to_string(),
ServiceEventInternal::Discovered {
identity,
metadata: meta.clone(),
},
)) {
warn!(
"error sending mDNS service event to '{service_name}' channel: {err}"
);
}
} else {
warn!(
"mDNS service '{service_name}' is missing from 'state.services'. This is likely a bug!"
);
}
if let Some(discovered) = state.discovered.get_mut(service_name) {
discovered.insert(
identity,
DiscoveredPeerCandidate {
peer_id,
meta,
addresses: info
.get_addresses()
.iter()
.map(|addr| SocketAddr::new(*addr, info.get_port()))
.collect(),
},
);
} else {
warn!("mDNS service '{service_name}' is missing from 'state.discovered'. This is likely a bug!");
}
}
ServiceEvent::ServiceRemoved(_, fullname) => {
let Some(TrackedService {
service_name,
identity,
}) = self.tracked_services.remove(&fullname)
else {
warn!(
"resolved mDNS peer deadvertising itself without having been discovered!"
);
return;
};
let mut state = state.write().unwrap_or_else(PoisonError::into_inner);
if let Some((tx, _)) = state.services.get_mut(&service_name) {
if let Err(err) = tx.send((
service_name.to_string(),
ServiceEventInternal::Expired { identity },
)) {
warn!("error sending mDNS service event '{service_name}': {err}");
}
} else {
warn!(
"mDNS service '{service_name}' is missing from 'state.services'. This is likely a bug!"
);
}
if let Some(discovered) = state.discovered.get_mut(&service_name) {
discovered.remove(&identity);
} else {
warn!("mDNS service '{service_name}' is missing from 'state.discovered'. This is likely a bug!");
}
}
ServiceEvent::SearchStopped(_) => {}
}
}
pub(crate) fn shutdown(&self) {
for service in &self.advertised_services {
self.mdns_daemon
.unregister(service)
.map_err(|err| {
error!("error removing mdns service '{service}': {err}");
})
.ok();
}
// TODO: Without this mDNS is not sending it goodbye packets without a timeout. Try and remove this cause it makes shutdown slow.
sleep(Duration::from_millis(100));
match self.mdns_daemon.shutdown() {
Ok(chan) => {
let _ = chan.recv();
}
Err(err) => {
error!("error shutting down mdns daemon: {err}");
}
}
}
}
struct MdnsRecv(flume::r#async::RecvStream<'static, ServiceEvent>, String);
impl Stream for MdnsRecv {
type Item = ServiceEvent;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.0.poll_next_unpin(cx)
}
}

View file

@ -1,7 +0,0 @@
mod manager;
mod mdns;
mod service;
pub use manager::*;
pub use mdns::*;
pub use service::*;

View file

@ -1,311 +0,0 @@
use std::{
collections::HashMap,
marker::PhantomData,
pin::Pin,
sync::{Arc, PoisonError, RwLock},
task::{Context, Poll},
};
use futures_core::Stream;
use libp2p::futures::StreamExt;
use pin_project_lite::pin_project;
use thiserror::Error;
use tokio::sync::{broadcast, mpsc};
use tokio_stream::wrappers::{errors::BroadcastStreamRecvError, BroadcastStream};
use tracing::warn;
use crate::{
spacetime::{UnicastStream, UnicastStreamError},
spacetunnel::RemoteIdentity,
DiscoveredPeer, DiscoveryManagerState, Manager, Metadata,
};
/// A Service represents a thing your application exposes to the network that can be discovered and connected to.
pub struct Service<TMeta> {
name: String,
state: Arc<RwLock<DiscoveryManagerState>>,
do_broadcast: broadcast::Sender<()>,
service_shutdown_tx: mpsc::Sender<String>,
manager: Arc<Manager>,
phantom: PhantomData<fn() -> TMeta>,
}
impl<TMeta: Metadata> Service<TMeta> {
// Construct a new service. This will not cause an advertisement until [Self::update] is called!
pub fn new(
name: impl Into<String>,
manager: Arc<Manager>,
) -> Result<Self, ErrDuplicateServiceName> {
let name = name.into();
let state = manager.discovery_state.clone();
let (do_broadcast, service_shutdown_tx) = {
let mut state = state.write().unwrap_or_else(PoisonError::into_inner);
if state.services.contains_key(&name) {
return Err(ErrDuplicateServiceName);
}
state.discovered.insert(name.clone(), Default::default());
state
.services
.insert(name.clone(), (broadcast::channel(20).0, Default::default()));
(
state.do_broadcast.clone(),
state.service_shutdown_tx.clone(),
)
};
// TODO: We call this but it won't have metadata set so it won't actually expose it
// However, it must be called to properly setup the listener (at least right now)
do_broadcast.send(()).ok();
Ok(Self {
name,
state,
do_broadcast,
service_shutdown_tx,
manager,
phantom: PhantomData,
})
}
#[must_use]
pub fn name(&self) -> &str {
&self.name
}
pub fn update(&self, meta: TMeta) {
if let Some((_, services_meta)) = self
.state
.write()
.unwrap_or_else(PoisonError::into_inner)
.services
.get_mut(&self.name)
{
let meta = meta.to_hashmap();
let did_change = services_meta.as_ref().is_some_and(|v| *v == meta);
*services_meta = Some(meta);
if did_change {
self.do_broadcast.send(()).ok();
}
} else {
warn!(
"Service::update called on non-existent service '{}'. This indicates a major bug in P2P!",
self.name
);
}
}
pub fn get_state(&self) -> HashMap<RemoteIdentity, PeerStatus> {
let connected = self
.manager
.state
.read()
.unwrap_or_else(PoisonError::into_inner)
.connected
.values()
.map(|remote_identity| (*remote_identity, PeerStatus::Connected))
.collect::<Vec<_>>();
let state = self.state.read().unwrap_or_else(PoisonError::into_inner);
state
.known
.get(&self.name)
.into_iter()
.flatten()
.map(|remote_identity| (*remote_identity, PeerStatus::Unavailable))
// We do these after the `Unavailable` to replace the keys that are in both
.chain(connected)
.chain(
state
.discovered
.get(&self.name)
.into_iter()
.flatten()
.map(|(remote_identity, _)| (*remote_identity, PeerStatus::Discovered)),
)
.collect::<HashMap<RemoteIdentity, PeerStatus>>()
}
pub fn add_known(&self, identity: Vec<RemoteIdentity>) {
self.state
.write()
.unwrap_or_else(PoisonError::into_inner)
.known
.entry(self.name.clone())
.or_default()
.extend(identity);
// TODO: Probally signal to discovery manager that we have new known peers -> This will be need for Relay but not for mDNS
}
// TODO: Remove in favor of `get_state` maybe???
pub fn get_discovered(&self) -> Vec<DiscoveredPeer<TMeta>> {
self.state
.read()
.unwrap_or_else(PoisonError::into_inner)
.discovered
.get(&self.name)
.into_iter()
.flatten()
.filter_map(|(i, p)| {
let metadata = match TMeta::from_hashmap(&p.meta) {
Ok(m) => m,
Err(err) => {
warn!("Failed to deserialize metadata for peer '{i:?}': {err}");
return None;
}
};
Some(DiscoveredPeer {
identity: *i,
peer_id: p.peer_id,
metadata,
addresses: p.addresses.clone(),
})
})
.collect::<Vec<_>>()
}
pub async fn connect(
&self,
manager: Arc<Manager>,
identity: &RemoteIdentity,
) -> Result<UnicastStream, UnicastStreamError> {
let candidate = {
let state = self.state.read().unwrap_or_else(PoisonError::into_inner);
let (_, candidate) = state
.discovered
.get(&self.name)
.ok_or(UnicastStreamError::ErrPeerIdNotFound(*identity))?
.iter()
.find(|(i, _)| *i == identity)
.ok_or(UnicastStreamError::ErrPeerIdNotFound(*identity))?;
candidate.clone()
};
let stream = manager.stream_inner(candidate.peer_id).await?; // TODO: handle providing incorrect peer id
Ok(stream)
}
#[allow(clippy::panic)] // This is a `.expect` (which is allowd) but with formatting
pub fn listen(&self) -> ServiceSubscription<TMeta> {
ServiceSubscription {
name: self.name.clone(),
rx: BroadcastStream::new(
self.state
.read()
.unwrap_or_else(PoisonError::into_inner)
.services
.get(&self.name)
.unwrap_or_else(|| panic!("Service '{}' not found in service map", self.name))
.0
.subscribe(),
),
phantom: PhantomData,
}
}
}
impl<Meta> Drop for Service<Meta> {
fn drop(&mut self) {
if self
.service_shutdown_tx
.try_send(self.name.clone())
.is_err()
{
// TODO: This will happen on shutdown due to the shutdown order. Try and fix that!
// Functionally all services are shutdown by the manager so this is a cosmetic fix.
warn!(
"Service::drop could not be called on '{}'. This indicates contention on the service shutdown channel and will result in out-of-date services being broadcasted.",
self.name
);
}
}
}
#[derive(Debug, Error)]
#[error("a service has already been mounted with this name")]
pub struct ErrDuplicateServiceName;
#[derive(Debug, Clone, Copy)]
#[cfg_attr(feature = "specta", derive(specta::Type))]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
pub enum PeerStatus {
Unavailable,
Discovered,
Connected,
}
#[derive(Debug, Clone)]
#[cfg_attr(feature = "specta", derive(specta::Type))]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
pub enum ServiceEvent<TMeta> {
Discovered {
identity: RemoteIdentity,
metadata: TMeta,
},
Expired {
identity: RemoteIdentity,
},
}
// Type-erased version of [ServiceEvent].
#[derive(Debug, Clone)]
pub enum ServiceEventInternal {
Discovered {
identity: RemoteIdentity,
metadata: HashMap<String, String>,
},
Expired {
identity: RemoteIdentity,
},
}
impl<TMeta: Metadata> TryFrom<ServiceEventInternal> for ServiceEvent<TMeta> {
type Error = String;
fn try_from(value: ServiceEventInternal) -> Result<Self, Self::Error> {
Ok(match value {
ServiceEventInternal::Discovered { identity, metadata } => Self::Discovered {
identity,
metadata: TMeta::from_hashmap(&metadata)?,
},
ServiceEventInternal::Expired { identity } => Self::Expired { identity },
})
}
}
pin_project! {
pub struct ServiceSubscription<TMeta> {
name: String,
rx: BroadcastStream<(String, ServiceEventInternal)>,
phantom: PhantomData<TMeta>,
}
}
impl<TMeta: Metadata> Stream for ServiceSubscription<TMeta> {
type Item = Result<ServiceEvent<TMeta>, BroadcastStreamRecvError>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
loop {
return match self.rx.poll_next_unpin(cx) {
Poll::Ready(Some(Ok((name, event)))) => {
if name != self.name {
continue;
}
match event.try_into() {
Ok(result) => Poll::Ready(Some(Ok(result))),
Err(err) => {
warn!("error decoding into TMeta for service '{name}': {err}");
continue; // TODO: This could *technically* cause stravation. Should this error be thrown outta the stream instead?
}
}
}
Poll::Ready(Some(Err(err))) => Poll::Ready(Some(Err(err))),
Poll::Ready(None) => Poll::Ready(None),
Poll::Pending => Poll::Pending,
};
}
}
}

View file

@ -1,39 +0,0 @@
use std::{net::SocketAddr, sync::Arc};
use crate::{spacetime::UnicastStream, spacetunnel::RemoteIdentity, ConnectedPeer, Manager};
/// represents an event coming from the network manager.
/// This is useful for updating your UI when stuff changes on the backend.
/// You can also interact with some events to cause an event.
#[derive(Debug)]
pub enum Event {
/// add a network interface on this node to listen for
AddListenAddr(SocketAddr),
/// remove a network interface from this node so that we don't listen to it
RemoveListenAddr(SocketAddr),
/// communication was established with a peer.
/// Theere could actually be multiple connections under the hood but we smooth it over in this API.
PeerConnected(ConnectedPeer),
/// communication was lost with a peer.
PeerDisconnected(RemoteIdentity),
/// the peer has opened a new unicast substream
PeerMessage(PeerMessageEvent),
/// the node is shutting down
Shutdown,
}
#[derive(Debug)]
pub struct PeerMessageEvent {
pub stream_id: u64,
pub identity: RemoteIdentity,
pub manager: Arc<Manager>,
pub stream: UnicastStream,
// Prevent manual creation by end-user
pub(crate) _priv: (),
}
impl From<PeerMessageEvent> for Event {
fn from(event: PeerMessageEvent) -> Self {
Self::PeerMessage(event)
}
}

View file

@ -1,26 +0,0 @@
//! Rust Peer to Peer Networking Library
#![warn(clippy::all, clippy::unwrap_used, clippy::panic)]
mod discovery;
mod event;
mod manager;
mod manager_stream;
mod peer;
pub mod proto;
pub mod spaceblock;
pub mod spacetime;
pub mod spacetunnel;
mod utils;
pub use discovery::*;
pub use event::*;
pub use manager::*;
pub use manager_stream::*;
pub use peer::*;
pub use utils::*;
// TODO: Remove this
#[doc(hidden)]
pub mod internal {
pub use libp2p::PeerId;
}

View file

@ -1,359 +0,0 @@
use std::{
collections::{HashMap, HashSet},
convert::Infallible,
fmt,
net::SocketAddr,
sync::{
atomic::{AtomicBool, AtomicU64},
Arc, PoisonError, RwLock,
},
};
use libp2p::{
core::{muxing::StreamMuxerBox, transport::ListenerId, ConnectedPoint},
PeerId, SwarmBuilder, Transport,
};
use serde::{Deserialize, Serialize};
use specta::Type;
use thiserror::Error;
use tokio::sync::{mpsc, oneshot};
use tracing::{error, warn};
use crate::{
spacetime::{SpaceTime, UnicastStream, UnicastStreamError},
spacetunnel::{Identity, RemoteIdentity},
DiscoveryManager, DiscoveryManagerState, Keypair, ManagerStream, ManagerStreamAction,
ManagerStreamAction2,
};
// State of the manager that may infrequently change
// These are broken out so updates to them can be done in sync (With single RwLock lock)
#[derive(Debug)]
pub struct DynamicManagerState {
pub(crate) config: ManagerConfig,
pub(crate) ipv4_listener_id: Option<Result<ListenerId, String>>,
pub(crate) ipv4_port: Option<u16>,
pub(crate) ipv6_listener_id: Option<Result<ListenerId, String>>,
pub(crate) ipv6_port: Option<u16>,
// A map of connected clients.
// This includes both inbound and outbound connections!
pub(crate) connected: HashMap<libp2p::PeerId, RemoteIdentity>,
// TODO: Removing this would be nice. It's a hack to things working after removing the `PeerId` from public API.
pub(crate) connections: HashMap<libp2p::PeerId, (ConnectedPoint, usize)>,
}
/// Is the core component of the P2P system that holds the state and delegates actions to the other components
pub struct Manager {
pub(crate) peer_id: PeerId,
pub(crate) identity: Identity,
pub(crate) application_name: String,
pub(crate) stream_id: AtomicU64,
pub(crate) state: RwLock<DynamicManagerState>,
pub(crate) discovery_state: Arc<RwLock<DiscoveryManagerState>>,
event_stream_tx: mpsc::Sender<ManagerStreamAction>,
event_stream_tx2: mpsc::Sender<ManagerStreamAction2>,
}
impl fmt::Debug for Manager {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Debug").finish()
}
}
impl Manager {
/// create a new P2P manager. Please do your best to make the callback closures as fast as possible because they will slow the P2P event loop!
pub async fn new(
application_name: &'static str,
keypair: &Keypair,
config: ManagerConfig,
) -> Result<(Arc<Self>, ManagerStream), ManagerError> {
application_name
.chars()
.all(|c| char::is_alphanumeric(c) || c == '-')
.then_some(())
.ok_or(ManagerError::InvalidAppName)?;
let peer_id = keypair.peer_id();
let (event_stream_tx, event_stream_rx) = mpsc::channel(128);
let (event_stream_tx2, event_stream_rx2) = mpsc::channel(128);
let config2 = config.clone();
let (discovery_state, service_shutdown_rx) = DiscoveryManagerState::new();
let this = Arc::new(Self {
application_name: format!("/{application_name}/spacetime/1.0.0"),
identity: keypair.to_identity(),
stream_id: AtomicU64::new(0),
state: RwLock::new(DynamicManagerState {
config,
ipv4_listener_id: None,
ipv4_port: None,
ipv6_listener_id: None,
ipv6_port: None,
connected: Default::default(),
connections: Default::default(),
}),
discovery_state,
peer_id,
event_stream_tx,
event_stream_tx2,
});
let mut swarm = ok(ok(SwarmBuilder::with_existing_identity(keypair.inner())
.with_tokio()
.with_other_transport(|keypair| {
libp2p_quic::GenTransport::<libp2p_quic::tokio::Provider>::new(
libp2p_quic::Config::new(keypair),
)
.map(|(p, c), _| (p, StreamMuxerBox::new(c)))
.boxed()
}))
.with_behaviour(|_| SpaceTime::new(this.clone())))
.build();
ManagerStream::refresh_listeners(
&mut swarm,
&mut this.state.write().unwrap_or_else(PoisonError::into_inner),
);
Ok((
this.clone(),
ManagerStream {
discovery_manager: DiscoveryManager::new(
application_name,
this.identity.to_remote_identity(),
this.peer_id,
&config2,
this.discovery_state.clone(),
service_shutdown_rx,
)?,
manager: this,
event_stream_rx,
event_stream_rx2,
swarm,
queued_events: Default::default(),
shutdown: AtomicBool::new(false),
on_establish_streams: HashMap::new(),
},
))
}
pub(crate) async fn emit(&self, event: ManagerStreamAction) {
match self.event_stream_tx.send(event).await {
Ok(()) => {}
Err(err) => warn!("error emitting event: {}", err),
}
}
pub fn identity(&self) -> RemoteIdentity {
self.identity.to_remote_identity()
}
pub fn libp2p_peer_id(&self) -> PeerId {
self.peer_id
}
pub async fn update_config(&self, config: ManagerConfig) {
self.emit(ManagerStreamAction::UpdateConfig(config)).await;
}
pub async fn get_connected_peers(&self) -> Result<Vec<RemoteIdentity>, ()> {
let (tx, rx) = oneshot::channel();
self.emit(ManagerStreamAction::GetConnectedPeers(tx)).await;
rx.await.map_err(|_| {
warn!("failed to get connected peers 3 times, returning error");
})
}
// TODO: Maybe remove this?
pub async fn stream(
&self,
identity: RemoteIdentity,
) -> Result<UnicastStream, UnicastStreamError> {
let peer_id = {
let state = self
.discovery_state
.read()
.unwrap_or_else(PoisonError::into_inner);
// TODO: This should not depend on a `Service` existing. Either we should store discovered peers separatly for this or we should remove this method (prefered).
state
.discovered
.iter()
.find_map(|(_, i)| i.iter().find(|(i, _)| **i == identity))
.ok_or(UnicastStreamError::PeerIdNotFound)?
.1
.peer_id
};
self.stream_inner(peer_id).await
}
// TODO: Should this be private now that connections can be done through the `Service`.
// TODO: Does this need any timeouts to be added cause hanging forever is bad?
// be aware this method is `!Sync` so can't be used from rspc. // TODO: Can this limitation be removed?
#[allow(clippy::unused_unit)] // TODO: Remove this clippy override once error handling is added
pub(crate) async fn stream_inner(
&self,
peer_id: PeerId,
) -> Result<UnicastStream, UnicastStreamError> {
// TODO: With this system you can send to any random peer id. Can I reduce that by requiring `.connect(peer_id).unwrap().send(data)` or something like that.
let (tx, rx) = oneshot::channel();
if let Err(err) = self
.event_stream_tx2
.send(ManagerStreamAction2::StartStream(peer_id, tx))
.await
{
warn!("error emitting event: {err}");
};
let stream = rx.await.map_err(|err| {
warn!("failed to queue establishing stream to peer '{peer_id}'!");
UnicastStreamError::ErrManagerShutdown(err)
})?;
stream.build(self, peer_id).await
}
// TODO: Cleanup return type and this API in general
#[allow(clippy::type_complexity)]
pub fn get_debug_state(
&self,
) -> (
PeerId,
RemoteIdentity,
ManagerConfig,
HashMap<PeerId, RemoteIdentity>,
HashSet<PeerId>,
HashMap<String, Option<HashMap<String, String>>>,
HashMap<
String,
HashMap<RemoteIdentity, (PeerId, HashMap<String, String>, Vec<SocketAddr>)>,
>,
HashMap<String, HashSet<RemoteIdentity>>,
) {
let state = self.state.read().unwrap_or_else(PoisonError::into_inner);
let discovery_state = self
.discovery_state
.read()
.unwrap_or_else(PoisonError::into_inner);
(
self.peer_id,
self.identity.to_remote_identity(),
state.config.clone(),
state.connected.clone(),
state.connections.keys().copied().collect(),
discovery_state
.services
.iter()
.map(|(k, v)| (k.clone(), v.1.clone()))
.collect(),
discovery_state
.discovered
.iter()
.map(|(k, v)| {
(
k.clone(),
v.clone()
.iter()
.map(|(k, v)| (*k, (v.peer_id, v.meta.clone(), v.addresses.clone())))
.collect::<HashMap<_, _>>(),
)
})
.collect(),
discovery_state.known.clone(),
)
}
pub fn status(&self) -> P2PStatus {
let state = self.state.read().unwrap_or_else(PoisonError::into_inner);
P2PStatus {
ipv4: match state.ipv4_listener_id.clone() {
Some(Ok(_)) => match state.ipv4_port {
Some(port) => ListenerStatus::Listening { port },
None => ListenerStatus::Enabling,
},
Some(Err(error)) => ListenerStatus::Error { error },
None => ListenerStatus::Disabled,
},
ipv6: match state.ipv6_listener_id.clone() {
Some(Ok(_)) => match state.ipv6_port {
Some(port) => ListenerStatus::Listening { port },
None => ListenerStatus::Enabling,
},
Some(Err(error)) => ListenerStatus::Error { error },
None => ListenerStatus::Disabled,
},
}
}
pub async fn shutdown(&self) {
let (tx, rx) = oneshot::channel();
if self
.event_stream_tx
.send(ManagerStreamAction::Shutdown(tx))
.await
.is_ok()
{
rx.await.unwrap_or_else(|_| {
warn!("Error receiving shutdown signal to P2P Manager!");
}); // Await shutdown so we don't kill the app before the Mdns broadcast
} else {
warn!("p2p was already shutdown, skipping...");
}
}
}
#[derive(Error, Debug)]
pub enum ManagerError {
#[error(
"the application name you application provided is invalid. Ensure it is alphanumeric!"
)]
InvalidAppName,
#[error("error with mdns discovery: {0}")]
Mdns(#[from] mdns_sd::Error),
// #[error("todo")]
// Manager(#[from] ManagerError),
}
/// The configuration for the P2P Manager
/// DO NOT MAKE BREAKING CHANGES - This is embedded in the `node_config.json`
/// For future me: `Keypair` is not on here cause hot reloading it hard.
#[derive(Debug, Clone, Serialize, Deserialize, Type)]
pub struct ManagerConfig {
// Enable or disable the P2P layer
pub enabled: bool,
// `None` will chose a random free port on startup
#[serde(default, skip_serializing_if = "Option::is_none")]
pub port: Option<u16>,
}
impl Default for ManagerConfig {
fn default() -> Self {
Self {
enabled: true,
port: None,
}
}
}
#[derive(Serialize, Debug, Type)]
pub struct P2PStatus {
ipv4: ListenerStatus,
ipv6: ListenerStatus,
}
#[derive(Serialize, Debug, Type)]
#[serde(tag = "status")]
pub enum ListenerStatus {
Disabled,
Enabling,
Listening { port: u16 },
Error { error: String },
}
fn ok<T>(v: Result<T, Infallible>) -> T {
match v {
Ok(v) => v,
Err(_) => unreachable!(),
}
}

View file

@ -1,469 +0,0 @@
use std::{
collections::{HashMap, HashSet, VecDeque},
fmt,
net::{Ipv4Addr, Ipv6Addr, SocketAddr},
sync::{
atomic::{AtomicBool, Ordering},
Arc, PoisonError,
},
};
use libp2p::{
futures::StreamExt,
swarm::{
dial_opts::{DialOpts, PeerCondition},
NotifyHandler, SwarmEvent, ToSwarm,
},
PeerId, Swarm,
};
use tokio::sync::{mpsc, oneshot};
use tracing::{debug, error, info, trace, warn};
use crate::{
quic_multiaddr_to_socketaddr, socketaddr_to_quic_multiaddr,
spacetime::{OutboundRequest, SpaceTime, UnicastStreamBuilder},
spacetunnel::RemoteIdentity,
DiscoveryManager, DynamicManagerState, Event, Manager, ManagerConfig, Mdns,
};
/// TODO
///
/// This is `Sync` so it can be used from within rspc.
pub enum ManagerStreamAction {
/// TODO
GetConnectedPeers(oneshot::Sender<Vec<RemoteIdentity>>),
/// Tell the [`libp2p::Swarm`](libp2p::Swarm) to establish a new connection to a peer.
Dial {
peer_id: PeerId,
addresses: Vec<SocketAddr>,
},
/// Update the config. This requires the `libp2p::Swarm`
UpdateConfig(ManagerConfig),
/// the node is shutting down. The `ManagerStream` should convert this into `Event::Shutdown`
Shutdown(oneshot::Sender<()>),
}
/// TODO: Get ride of this and merge into `ManagerStreamAction` without breaking rspc procedures
///
/// This is `!Sync` so can't be used from within rspc.
pub enum ManagerStreamAction2 {
/// Events are returned to the application via the `ManagerStream::next` method.
Event(Event),
/// Events are returned to the application via the `ManagerStream::next` method.
Events(Vec<Event>),
/// TODO
StartStream(PeerId, oneshot::Sender<UnicastStreamBuilder>),
}
impl fmt::Debug for ManagerStreamAction {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("ManagerStreamAction")
}
}
impl fmt::Debug for ManagerStreamAction2 {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("ManagerStreamAction2")
}
}
impl From<Event> for ManagerStreamAction2 {
fn from(event: Event) -> Self {
Self::Event(event)
}
}
/// TODO
#[must_use = "streams do nothing unless polled"]
pub struct ManagerStream {
pub(crate) manager: Arc<Manager>,
pub(crate) event_stream_rx: mpsc::Receiver<ManagerStreamAction>,
pub(crate) event_stream_rx2: mpsc::Receiver<ManagerStreamAction2>,
pub(crate) swarm: Swarm<SpaceTime>,
pub(crate) discovery_manager: DiscoveryManager,
pub(crate) queued_events: VecDeque<Event>,
pub(crate) shutdown: AtomicBool,
pub(crate) on_establish_streams: HashMap<libp2p::PeerId, Vec<OutboundRequest>>,
}
impl ManagerStream {
/// Setup the libp2p listeners based on the manager config.
/// This method will take care of removing old listeners if needed
pub(crate) fn refresh_listeners(swarm: &mut Swarm<SpaceTime>, state: &mut DynamicManagerState) {
if state.config.enabled {
let port = state.config.port.unwrap_or(0);
if state.ipv4_listener_id.is_none() || matches!(state.ipv6_listener_id, Some(Err(_))) {
state.ipv4_listener_id = Some(
swarm
.listen_on(socketaddr_to_quic_multiaddr(&SocketAddr::from((
Ipv4Addr::UNSPECIFIED,
port,
))))
.map(|id| {
debug!("registered ipv4 listener: {id:?}");
id
})
.map_err(|err| {
error!("failed to register ipv4 listener on port {port}: {err}");
err.to_string()
}),
);
}
if state.ipv4_listener_id.is_none() || matches!(state.ipv6_listener_id, Some(Err(_))) {
state.ipv6_listener_id = Some(
swarm
.listen_on(socketaddr_to_quic_multiaddr(&SocketAddr::from((
Ipv6Addr::UNSPECIFIED,
port,
))))
.map(|id| {
debug!("registered ipv6 listener: {id:?}");
id
})
.map_err(|err| {
error!("failed to register ipv6 listener on port {port}: {err}");
err.to_string()
}),
);
}
} else {
if let Some(Ok(listener)) = state.ipv4_listener_id.take() {
debug!("removing ipv4 listener with id '{:?}'", listener);
swarm.remove_listener(listener);
}
if let Some(Ok(listener)) = state.ipv6_listener_id.take() {
debug!("removing ipv6 listener with id '{:?}'", listener);
swarm.remove_listener(listener);
}
}
}
}
enum EitherManagerStreamAction {
A(ManagerStreamAction),
B(ManagerStreamAction2),
}
impl From<ManagerStreamAction> for EitherManagerStreamAction {
fn from(event: ManagerStreamAction) -> Self {
Self::A(event)
}
}
impl From<ManagerStreamAction2> for EitherManagerStreamAction {
fn from(event: ManagerStreamAction2) -> Self {
Self::B(event)
}
}
impl ManagerStream {
pub fn listen_addrs(&self) -> HashSet<SocketAddr> {
self.discovery_manager.listen_addrs.clone()
}
// Your application should keep polling this until `None` is received or the P2P system will be halted.
pub async fn next(&mut self) -> Option<Event> {
// We loop polling internal services until an event comes in that needs to be sent to the parent application.
loop {
assert!(!self.shutdown.load(Ordering::Relaxed), "`ManagerStream::next` called after shutdown event. This is a mistake in your application code!");
if let Some(event) = self.queued_events.pop_front() {
return Some(event);
}
tokio::select! {
() = self.discovery_manager.poll() => {
continue;
},
event = self.event_stream_rx.recv() => {
// If the sender has shut down we return `None` to also shut down too.
if let Some(event) = self.handle_manager_stream_action(event?.into()).await {
return Some(event);
}
}
event = self.event_stream_rx2.recv() => {
// If the sender has shut down we return `None` to also shut down too.
if let Some(event) = self.handle_manager_stream_action(event?.into()).await {
return Some(event);
}
}
event = self.swarm.select_next_some() => {
match event {
SwarmEvent::Behaviour(event) => {
if let Some(event) = self.handle_manager_stream_action(event.into()).await {
if let Event::Shutdown { .. } = event {
self.shutdown.store(true, Ordering::Relaxed);
}
return Some(event);
}
},
SwarmEvent::ConnectionEstablished { peer_id, .. } => {
if let Some(streams) = self.on_establish_streams.remove(&peer_id) {
for event in streams {
self.swarm
.behaviour_mut()
.pending_events
.push_back(ToSwarm::NotifyHandler {
peer_id,
handler: NotifyHandler::Any,
event
});
}
}
},
SwarmEvent::ConnectionClosed { peer_id, num_established, .. } => {
if num_established == 0 {
let mut state = self.manager.state.write()
.unwrap_or_else(PoisonError::into_inner);
if state
.connected
.remove(&peer_id).is_none() || state.connections.remove(&peer_id).is_none() {
warn!("unable to remove unconnected client from connected map. This indicates a bug!");
}
}
},
SwarmEvent::IncomingConnection { local_addr, .. } => debug!("incoming connection from '{}'", local_addr),
SwarmEvent::IncomingConnectionError { local_addr, error, .. } => warn!("handshake error with incoming connection from '{}': {}", local_addr, error),
SwarmEvent::OutgoingConnectionError { peer_id, error, .. } => warn!("error establishing connection with '{:?}': {}", peer_id, error),
SwarmEvent::NewListenAddr { listener_id, address, .. } => {
let addr = match quic_multiaddr_to_socketaddr(address.clone()) {
Ok(addr) => addr,
Err(err) => {
warn!("error passing listen address '{address:?}': {err:?}");
continue;
}
};
{
let mut state = self.manager.state.write().unwrap_or_else(PoisonError::into_inner);
if let Some(Ok(lid)) = &state.ipv4_listener_id {
if *lid == listener_id {
state.ipv4_port = Some(addr.port());
}
}
if let Some(Ok(lid)) = &state.ipv6_listener_id {
if *lid == listener_id {
state.ipv6_port = Some(addr.port());
}
}
}
match quic_multiaddr_to_socketaddr(address) {
Ok(addr) => {
trace!("listen address added: {}", addr);
self.discovery_manager.listen_addrs.insert(addr);
self.discovery_manager.do_advertisement();
return Some(Event::AddListenAddr(addr));
},
Err(err) => {
warn!("error passing listen address: {}", err);
continue;
}
}
},
SwarmEvent::ExpiredListenAddr { address, .. } => {
match quic_multiaddr_to_socketaddr(address) {
Ok(addr) => {
trace!("listen address expired: {}", addr);
self.discovery_manager.listen_addrs.remove(&addr);
self.discovery_manager.do_advertisement();
return Some(Event::RemoveListenAddr(addr));
},
Err(err) => {
warn!("error passing listen address: {}", err);
continue;
}
}
}
SwarmEvent::ListenerClosed { listener_id, addresses, reason } => {
trace!("listener '{:?}' was closed due to: {:?}", listener_id, reason);
for address in addresses {
match quic_multiaddr_to_socketaddr(address) {
Ok(addr) => {
trace!("listen address closed: {}", addr);
self.discovery_manager.listen_addrs.remove(&addr);
self.queued_events.push_back(Event::RemoveListenAddr(addr));
},
Err(err) => {
warn!("error passing listen address: {}", err);
continue;
}
}
}
// The `loop` will restart and begin returning the events from `queued_events`.
}
SwarmEvent::ListenerError { listener_id, error } => warn!("listener '{:?}' reported a non-fatal error: {}", listener_id, error),
SwarmEvent::Dialing { .. } => {},
_ => {}
}
}
}
}
}
async fn handle_manager_stream_action(
&mut self,
event: EitherManagerStreamAction,
) -> Option<Event> {
match event {
EitherManagerStreamAction::A(event) => match event {
ManagerStreamAction::GetConnectedPeers(response) => {
let result = {
let state = self
.manager
.state
.read()
.unwrap_or_else(PoisonError::into_inner);
self.swarm
.connected_peers()
.filter_map(|v| {
let v = state.connected.get(v);
if v.is_none() {
warn!("Error converting PeerId({v:?}) into RemoteIdentity. This is likely a bug in P2P.");
}
v.copied()
})
.collect::<Vec<_>>()
};
response
.send(result)
.map_err(|_| {
error!("Error sending response to `GetConnectedPeers` request! Sending was dropped!");
})
.ok();
}
ManagerStreamAction::Dial { peer_id, addresses } => {
match self.swarm.dial(
DialOpts::peer_id(peer_id)
.condition(PeerCondition::Disconnected)
.addresses(addresses.iter().map(socketaddr_to_quic_multiaddr).collect())
.build(),
) {
Ok(()) => {}
Err(err) => warn!(
"error dialing peer '{}' with addresses '{:?}': {}",
peer_id, addresses, err
),
}
}
ManagerStreamAction::UpdateConfig(config) => {
let mut state = self
.manager
.state
.write()
.unwrap_or_else(PoisonError::into_inner);
state.config = config;
Self::refresh_listeners(&mut self.swarm, &mut state);
if !state.config.enabled {
if let Some(mdns) = self.discovery_manager.mdns.take() {
drop(state);
mdns.shutdown();
}
} else if self.discovery_manager.mdns.is_none() {
match Mdns::new(
self.discovery_manager.application_name,
self.discovery_manager.identity,
self.discovery_manager.peer_id,
) {
Ok(mdns) => {
self.discovery_manager.mdns = Some(mdns);
self.discovery_manager.do_advertisement();
}
Err(err) => {
error!("error starting mDNS service: {err:?}");
self.discovery_manager.mdns = None;
// state.config.enabled = false;
// TODO: Properly reset the UI state cause it will be outa sync
}
}
}
// drop(state);
}
ManagerStreamAction::Shutdown(tx) => {
info!("Shutting down P2P Manager...");
self.discovery_manager.shutdown();
tx.send(()).unwrap_or_else(|()| {
warn!("Error sending shutdown signal to P2P Manager!");
});
return Some(Event::Shutdown);
}
},
EitherManagerStreamAction::B(event) => match event {
ManagerStreamAction2::Event(event) => return Some(event),
ManagerStreamAction2::Events(mut events) => {
let first = events.pop();
for event in events {
self.queued_events.push_back(event);
}
return first;
}
ManagerStreamAction2::StartStream(peer_id, tx) => {
if !self.swarm.connected_peers().any(|v| *v == peer_id) {
let Some(addresses) = self
.discovery_manager
.state
.read()
.unwrap_or_else(PoisonError::into_inner)
.discovered
.iter()
.find_map(|(_, service)| {
service.iter().find_map(|(_, v)| {
(v.peer_id == peer_id).then(|| v.addresses.clone())
})
})
else {
warn!("Peer '{}' is not connected and no addresses are known for it! Skipping connection creation...", peer_id);
return None;
};
match self.swarm.dial(
DialOpts::peer_id(peer_id)
.condition(PeerCondition::Disconnected)
.addresses(
addresses.iter().map(socketaddr_to_quic_multiaddr).collect(),
)
.build(),
) {
Ok(()) => {}
Err(err) => warn!(
"error dialing peer '{}' with addresses '{:?}': {}",
peer_id, addresses, err
),
}
self.on_establish_streams
.entry(peer_id)
.or_default()
.push(OutboundRequest::Unicast(tx));
} else {
self.swarm.behaviour_mut().pending_events.push_back(
ToSwarm::NotifyHandler {
peer_id,
handler: NotifyHandler::Any,
event: OutboundRequest::Unicast(tx),
},
);
}
}
},
}
None
}
}

View file

@ -1,48 +0,0 @@
use std::{
fmt::{self, Formatter},
net::SocketAddr,
};
use libp2p::PeerId;
use crate::{spacetunnel::RemoteIdentity, Metadata};
/// Represents a discovered peer.
/// This is held by [Manager] to keep track of discovered peers
#[derive(Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
#[cfg_attr(feature = "specta", derive(specta::Type))]
pub struct DiscoveredPeer<TMeta: Metadata> {
/// the public key of the discovered peer
pub identity: RemoteIdentity,
/// the libp2p peer id of the discovered peer
#[serde(skip)]
pub peer_id: PeerId,
/// get the metadata of the discovered peer
pub metadata: TMeta,
/// get the addresses of the discovered peer
pub addresses: Vec<SocketAddr>,
}
// `Manager` impls `Debug` but it causes infinite loop and stack overflow, lmao.
impl<TMeta: Metadata> fmt::Debug for DiscoveredPeer<TMeta> {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("DiscoveredPeer")
.field("peer_id", &self.peer_id)
.field("metadata", &self.metadata)
.field("addresses", &self.addresses)
.finish()
}
}
/// Represents a connected peer.
/// This is held by [Manager] to keep track of connected peers
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
#[cfg_attr(feature = "specta", derive(specta::Type))]
pub struct ConnectedPeer {
/// get the identity of the discovered peer
pub identity: RemoteIdentity,
/// Did I open the connection?
pub establisher: bool,
}

View file

@ -1,185 +0,0 @@
use std::{
collections::VecDeque,
sync::{Arc, PoisonError},
task::{Context, Poll},
};
use libp2p::{
core::{ConnectedPoint, Endpoint},
swarm::{
derive_prelude::{ConnectionEstablished, ConnectionId, FromSwarm},
ConnectionClosed, ConnectionDenied, NetworkBehaviour, THandler, THandlerInEvent,
THandlerOutEvent, ToSwarm,
},
Multiaddr,
};
use thiserror::Error;
use tracing::{debug, trace, warn};
use crate::{Event, Manager, ManagerStreamAction2};
use super::SpaceTimeConnection;
/// Internal threshold for when to shrink the capacity
/// of empty queues. If the capacity of an empty queue
/// exceeds this threshold, the associated memory is
/// released.
pub const EMPTY_QUEUE_SHRINK_THRESHOLD: usize = 100;
// TODO: Remove this?
#[derive(Debug, Error)]
pub enum OutboundFailure {}
/// `SpaceTime` is a [`NetworkBehaviour`](libp2p_swarm::NetworkBehaviour) that implements the `SpaceTime` protocol.
/// This protocol sits under the application to abstract many complexities of 2 way connections and deals with authentication, chucking, etc.
pub struct SpaceTime {
pub(crate) manager: Arc<Manager>,
pub(crate) pending_events:
VecDeque<ToSwarm<<Self as NetworkBehaviour>::ToSwarm, THandlerInEvent<Self>>>,
}
impl SpaceTime {
/// intialise the fabric of space time
pub fn new(manager: Arc<Manager>) -> Self {
Self {
manager,
pending_events: VecDeque::new(),
}
}
}
impl NetworkBehaviour for SpaceTime {
type ConnectionHandler = SpaceTimeConnection;
type ToSwarm = ManagerStreamAction2;
fn handle_established_inbound_connection(
&mut self,
_connection_id: ConnectionId,
peer_id: libp2p::PeerId,
_local_addr: &Multiaddr,
_remote_addr: &Multiaddr,
) -> Result<THandler<Self>, ConnectionDenied> {
Ok(SpaceTimeConnection::new(peer_id, self.manager.clone()))
}
fn handle_pending_outbound_connection(
&mut self,
_connection_id: ConnectionId,
_maybe_peer: Option<libp2p::PeerId>,
_addresses: &[Multiaddr],
_effective_role: Endpoint,
) -> Result<Vec<Multiaddr>, ConnectionDenied> {
// This should be unused but libp2p still calls it
Ok(vec![])
}
fn handle_established_outbound_connection(
&mut self,
_connection_id: ConnectionId,
peer_id: libp2p::PeerId,
_addr: &Multiaddr,
_role_override: Endpoint,
) -> Result<THandler<Self>, ConnectionDenied> {
Ok(SpaceTimeConnection::new(peer_id, self.manager.clone()))
}
fn on_swarm_event(&mut self, event: FromSwarm) {
match event {
FromSwarm::ConnectionEstablished(ConnectionEstablished {
peer_id,
endpoint,
other_established,
..
}) => {
let address = match endpoint {
ConnectedPoint::Dialer { address, .. } => Some(address.clone()),
ConnectedPoint::Listener { .. } => None,
};
trace!(
"connection establishing with peer '{}' found at '{:?}'; peer has {} active connections",
peer_id, address, other_established
);
self.manager
.state
.write()
.unwrap_or_else(PoisonError::into_inner)
.connections
.insert(peer_id, (endpoint.clone(), other_established));
}
FromSwarm::ConnectionClosed(ConnectionClosed {
peer_id,
remaining_established,
..
}) => {
if remaining_established == 0 {
debug!("Disconnected from peer '{}'", peer_id);
let mut state = self
.manager
.state
.write()
.unwrap_or_else(PoisonError::into_inner);
state.connections.remove(&peer_id);
if let Some(remote_identity) = state.connected.remove(&peer_id) {
self.pending_events.push_back(ToSwarm::GenerateEvent(
Event::PeerDisconnected(remote_identity).into(),
));
} else {
warn!("Disconnected peer '{peer_id}' but was not connected. This likely indicates a bug!");
}
}
}
FromSwarm::AddressChange(event) => {
debug!(
"Address change event: {:?} {:?} {:?} {:?}",
event.peer_id, event.connection_id, event.old, event.new
);
}
FromSwarm::DialFailure(event) => {
if let Some(peer_id) = event.peer_id {
debug!("Dialing failure to peer '{}': {:?}", peer_id, event.error);
// TODO
// If there are pending outgoing requests when a dial failure occurs,
// it is implied that we are not connected to the peer, since pending
// outgoing requests are drained when a connection is established and
// only created when a peer is not connected when a request is made.
// Thus these requests must be considered failed, even if there is
// another, concurrent dialing attempt ongoing.
// if let Some(pending) = self.pending_outbound_requests.remove(&peer_id) {
// for request in pending {
// self.pending_events
// .push_back(NetworkBehaviourAction::GenerateEvent(
// Event::OutboundFailure {
// peer_id,
// request_id: request.request_id,
// error: OutboundFailure::DialFailure,
// },
// ));
// }
// }
}
}
_ => {}
}
}
fn on_connection_handler_event(
&mut self,
_peer_id: libp2p::PeerId,
_connection: ConnectionId,
event: THandlerOutEvent<Self>,
) {
self.pending_events.push_back(ToSwarm::GenerateEvent(event));
}
fn poll(&mut self, _: &mut Context<'_>) -> Poll<ToSwarm<Self::ToSwarm, THandlerInEvent<Self>>> {
if let Some(ev) = self.pending_events.pop_front() {
return Poll::Ready(ev);
} else if self.pending_events.capacity() > EMPTY_QUEUE_SHRINK_THRESHOLD {
self.pending_events.shrink_to_fit();
}
Poll::Pending
}
}

View file

@ -1,140 +0,0 @@
use libp2p::{
swarm::{
handler::{
ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, FullyNegotiatedInbound,
},
SubstreamProtocol,
},
PeerId,
};
use std::{
collections::VecDeque,
sync::Arc,
task::{Context, Poll},
time::Duration,
};
use tracing::error;
use crate::{Manager, ManagerStreamAction2};
use super::{InboundProtocol, OutboundProtocol, OutboundRequest, EMPTY_QUEUE_SHRINK_THRESHOLD};
// TODO: Probs change this based on the ConnectionEstablishmentPayload
const SUBSTREAM_TIMEOUT: Duration = Duration::from_secs(10); // TODO: Tune value
#[allow(clippy::type_complexity)]
pub struct SpaceTimeConnection {
peer_id: PeerId,
manager: Arc<Manager>,
pending_events: VecDeque<
ConnectionHandlerEvent<
OutboundProtocol,
<Self as ConnectionHandler>::OutboundOpenInfo,
<Self as ConnectionHandler>::ToBehaviour,
// StreamUpgradeError<io::Error>,
>,
>,
}
impl SpaceTimeConnection {
pub(super) fn new(peer_id: PeerId, manager: Arc<Manager>) -> Self {
Self {
peer_id,
manager,
pending_events: VecDeque::new(),
}
}
}
impl ConnectionHandler for SpaceTimeConnection {
type FromBehaviour = OutboundRequest;
type ToBehaviour = ManagerStreamAction2;
type InboundProtocol = InboundProtocol;
type OutboundProtocol = OutboundProtocol;
type OutboundOpenInfo = ();
type InboundOpenInfo = ();
fn listen_protocol(&self) -> SubstreamProtocol<Self::InboundProtocol, Self::InboundOpenInfo> {
SubstreamProtocol::new(
InboundProtocol {
peer_id: self.peer_id,
manager: self.manager.clone(),
},
(),
)
.with_timeout(SUBSTREAM_TIMEOUT)
}
fn on_behaviour_event(&mut self, req: Self::FromBehaviour) {
// TODO: Working keep alives
// self.keep_alive = KeepAlive::Yes;
// self.outbound.push_back(request);
self.pending_events
.push_back(ConnectionHandlerEvent::OutboundSubstreamRequest {
protocol: SubstreamProtocol::new(
OutboundProtocol {
application_name: self.manager.application_name.clone(),
req,
identity: self.manager.identity.clone(),
},
(),
) // TODO: Use `info` here maybe to pass into about the client. Idk?
.with_timeout(SUBSTREAM_TIMEOUT),
});
}
fn connection_keep_alive(&self) -> bool {
true // TODO: Make this work how the old one did with storing it on `self` and updating on events
}
fn poll(
&mut self,
_cx: &mut Context<'_>,
) -> Poll<
ConnectionHandlerEvent<Self::OutboundProtocol, Self::OutboundOpenInfo, Self::ToBehaviour>,
> {
if let Some(event) = self.pending_events.pop_front() {
return Poll::Ready(event);
} else if self.pending_events.capacity() > EMPTY_QUEUE_SHRINK_THRESHOLD {
self.pending_events.shrink_to_fit();
}
Poll::Pending
}
// TODO: Which level we doing error handler?. On swarm, on Behavior or here???
fn on_connection_event(
&mut self,
event: ConnectionEvent<
Self::InboundProtocol,
Self::OutboundProtocol,
Self::InboundOpenInfo,
Self::OutboundOpenInfo,
>,
) {
match event {
ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound {
protocol, ..
}) => {
self.pending_events
.push_back(ConnectionHandlerEvent::NotifyBehaviour(protocol));
}
ConnectionEvent::FullyNegotiatedOutbound(_) => {}
ConnectionEvent::DialUpgradeError(event) => {
error!("DialUpgradeError: {:#?}", event.error);
}
ConnectionEvent::ListenUpgradeError(event) => {
error!("DialUpgradeError: {:#?}", event.error);
// TODO: If `event.error` close connection cause we don't "speak the same language"!
}
ConnectionEvent::AddressChange(_) => {
// TODO: Should we be telling `SpaceTime` to update it's info here or is it also getting this event?
}
ConnectionEvent::LocalProtocolsChange(_) => {}
ConnectionEvent::RemoteProtocolsChange(_) => {}
_ => {}
}
}
}

View file

@ -1,10 +0,0 @@
//! This file contains of stuff to make libp2p work for us. They are fairly meaningless.
#[derive(Clone)]
pub struct SpaceTimeProtocolName(pub String);
impl AsRef<str> for SpaceTimeProtocolName {
fn as_ref(&self) -> &str {
&self.0
}
}

View file

@ -1,16 +0,0 @@
//! `Spacetime` is just a fancy name for the protocol which sits between libp2p and the application built on this library.
//! This protocol sits under the application to abstract many complexities of 2 way connections and deals with authentication, chucking, etc.
mod behaviour;
mod connection;
mod libp2p;
mod proto_inbound;
mod proto_outbound;
mod stream;
pub use self::libp2p::*;
pub use behaviour::*;
pub use connection::*;
pub use proto_inbound::*;
pub use proto_outbound::*;
pub use stream::*;

View file

@ -1,110 +0,0 @@
use std::{
future::Future,
pin::Pin,
sync::{atomic::Ordering, Arc, PoisonError},
};
use libp2p::{
core::{ConnectedPoint, UpgradeInfo},
InboundUpgrade, PeerId, Stream,
};
use tokio_util::compat::FuturesAsyncReadCompatExt;
use tracing::{debug, warn};
use crate::{
spacetime::UnicastStream, ConnectedPeer, Event, Manager, ManagerStreamAction2, PeerMessageEvent,
};
use super::SpaceTimeProtocolName;
pub struct InboundProtocol {
pub(crate) peer_id: PeerId,
pub(crate) manager: Arc<Manager>,
}
impl UpgradeInfo for InboundProtocol {
type Info = SpaceTimeProtocolName;
type InfoIter = [Self::Info; 1];
fn protocol_info(&self) -> Self::InfoIter {
[SpaceTimeProtocolName(self.manager.application_name.clone())]
}
}
impl InboundUpgrade<Stream> for InboundProtocol {
type Output = ManagerStreamAction2;
type Error = ();
type Future = Pin<Box<dyn Future<Output = Result<Self::Output, Self::Error>> + Send + 'static>>;
fn upgrade_inbound(self, io: Stream, _: Self::Info) -> Self::Future {
let id = self.manager.stream_id.fetch_add(1, Ordering::Relaxed);
Box::pin(async move {
debug!(
"stream({}, {id}): accepting inbound connection",
self.peer_id
);
let io = io.compat();
debug!("stream({}, {id}): unicast stream accepted", self.peer_id);
let stream = match UnicastStream::new_inbound(self.manager.identity.clone(), io).await {
Ok(v) => v,
Err(err) => {
warn!(
"Failed to construct 'UnicastStream' with Peer('{}'): {err:?}",
self.peer_id
);
return Err(());
}
};
let establisher = {
let mut state = self
.manager
.state
.write()
.unwrap_or_else(PoisonError::into_inner);
state
.connected
.insert(self.peer_id, stream.remote_identity());
match state.connections.get(&self.peer_id) {
Some((endpoint, 0)) => Some(match endpoint {
ConnectedPoint::Dialer { .. } => true,
ConnectedPoint::Listener { .. } => false,
}),
None => {
warn!("Error getting PeerId({})'s connection state. This indicates a bug in P2P", self.peer_id);
None
}
_ => None,
}
};
debug!(
"sending establishment request to peer '{}'",
stream.remote_identity()
);
let identity = stream.remote_identity();
let mut events = vec![PeerMessageEvent {
stream_id: id,
identity,
manager: self.manager.clone(),
stream,
_priv: (),
}
.into()];
if let Some(establisher) = establisher {
events.push(Event::PeerConnected(ConnectedPeer {
identity,
establisher,
}));
}
Ok(ManagerStreamAction2::Events(events))
})
}
}

View file

@ -1,54 +0,0 @@
use std::future::{ready, Ready};
use libp2p::{core::UpgradeInfo, OutboundUpgrade, Stream};
use tokio::sync::oneshot;
use tokio_util::compat::FuturesAsyncReadCompatExt;
use tracing::warn;
use crate::spacetunnel::Identity;
use super::{SpaceTimeProtocolName, UnicastStreamBuilder};
#[derive(Debug)]
pub enum OutboundRequest {
Unicast(oneshot::Sender<UnicastStreamBuilder>),
}
pub struct OutboundProtocol {
pub(crate) application_name: String,
pub(crate) req: OutboundRequest,
pub(crate) identity: Identity,
}
impl UpgradeInfo for OutboundProtocol {
type Info = SpaceTimeProtocolName;
type InfoIter = [Self::Info; 1];
fn protocol_info(&self) -> Self::InfoIter {
[SpaceTimeProtocolName(self.application_name.clone())]
}
}
impl OutboundUpgrade<Stream> for OutboundProtocol {
type Output = ();
type Error = ();
type Future = Ready<Result<(), ()>>;
fn upgrade_outbound(self, io: Stream, _protocol: Self::Info) -> Self::Future {
let result = match self.req {
OutboundRequest::Unicast(sender) => {
// We write the discriminator to the stream in the `Manager::stream` method before returning the stream to the user to make async a tad nicer.
sender
.send(UnicastStreamBuilder::new(
self.identity.clone(),
io.compat(),
))
.map_err(|err| {
warn!("error transmitting unicast stream: {err:?}");
})
}
};
ready(result)
}
}

View file

@ -1,200 +0,0 @@
use std::{
io::{self},
pin::Pin,
sync::PoisonError,
task::{Context, Poll},
};
use libp2p::{futures::AsyncWriteExt, PeerId, Stream};
use thiserror::Error;
use tokio::{
io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt as TokioAsyncWriteExt, ReadBuf},
sync::oneshot,
time::{timeout, Duration},
};
use tokio_util::compat::Compat;
use crate::{
spacetunnel::{Identity, IdentityErr, RemoteIdentity, REMOTE_IDENTITY_LEN},
Manager,
};
pub const CHALLENGE_LENGTH: usize = 32;
const ONE_MINUTE: Duration = Duration::from_secs(60);
/// A unicast stream is a direct stream to a specific peer.
#[derive(Debug)]
#[allow(unused)] // TODO: Remove this lint override
pub struct UnicastStream {
io: Compat<Stream>,
me: Identity,
remote: RemoteIdentity,
}
// TODO: Utils for sending msgpack and stuff over the stream. -> Have a max size of reading buffers so we are less susceptible to DoS attacks.
impl UnicastStream {
pub(crate) async fn new_inbound(
identity: Identity,
mut io: Compat<Stream>,
) -> Result<Self, UnicastStreamError> {
// TODO: Finish this
// let mut challenge = [0u8; CHALLENGE_LENGTH];
// io.read_exact(&mut challenge).await.unwrap(); // TODO: Timeout
// let nonce = ChaCha20Poly1305::generate_nonce(&mut OsRng); // 96-bits; unique per message
// let ciphertext = cipher.encrypt(&nonce, b"plaintext message".as_ref())?;
// let plaintext = cipher.decrypt(&nonce, ciphertext.as_ref())?;
// TODO: THIS IS INSECURE!!!!!
// We are just sending strings of the public key without any verification the other party holds the private key.
let mut actual = [0; REMOTE_IDENTITY_LEN];
match timeout(ONE_MINUTE, io.read_exact(&mut actual)).await {
Ok(r) => r?,
Err(_) => return Err(UnicastStreamError::Timeout),
};
let remote = RemoteIdentity::from_bytes(&actual)?;
match timeout(
ONE_MINUTE,
io.write_all(&identity.to_remote_identity().get_bytes()),
)
.await
{
Ok(w) => w?,
Err(_) => return Err(UnicastStreamError::Timeout),
};
// TODO: Do we have something to compare against? I don't think so this is fine.
// if expected.get_bytes() != actual {
// panic!("Mismatch in remote identity!");
// }
Ok(Self {
io,
me: identity,
remote,
})
}
pub(crate) async fn new_outbound(
identity: Identity,
mut io: Compat<Stream>,
) -> Result<Self, UnicastStreamError> {
// TODO: Use SPAKE not some handrolled insecure mess
// let challenge = rand::thread_rng().gen::<[u8; CHALLENGE_LENGTH]>();
// self.0.write_all(&challenge).await?;
// TODO: THIS IS INSECURE!!!!!
// We are just sending strings of the public key without any verification the other party holds the private key.
match timeout(
ONE_MINUTE,
io.write_all(&identity.to_remote_identity().get_bytes()),
)
.await
{
Ok(w) => w?,
Err(_) => return Err(UnicastStreamError::Timeout),
};
let mut actual = [0; REMOTE_IDENTITY_LEN];
match timeout(ONE_MINUTE, io.read_exact(&mut actual)).await {
Ok(r) => r?,
Err(_) => return Err(UnicastStreamError::Timeout),
};
let remote = RemoteIdentity::from_bytes(&actual)?;
// TODO: Do we have something to compare against? I don't think so this is fine.
// if expected.get_bytes() != actual {
// panic!("Mismatch in remote identity!");
// }
Ok(Self {
io,
me: identity,
remote,
})
}
#[must_use]
pub fn remote_identity(&self) -> RemoteIdentity {
self.remote
}
pub async fn close(self) -> Result<(), io::Error> {
self.io.into_inner().close().await
}
}
impl AsyncRead for UnicastStream {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
Pin::new(&mut self.get_mut().io).poll_read(cx, buf)
}
}
impl AsyncWrite for UnicastStream {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.get_mut().io).poll_write(cx, buf)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.get_mut().io).poll_flush(cx)
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.get_mut().io).poll_shutdown(cx)
}
}
#[derive(Debug, Error)]
pub enum UnicastStreamError {
#[error("io error: {0}")]
IoError(#[from] io::Error),
#[error("identity error: {0}")]
InvalidError(#[from] IdentityErr),
// TODO: Technically this error is from the manager
#[error("peer id not found")]
PeerIdNotFound,
#[error("error manager shutdown")]
ErrManagerShutdown(#[from] oneshot::error::RecvError),
#[error("error getting peer id for '{0}'")]
ErrPeerIdNotFound(RemoteIdentity),
#[error("timeout")]
Timeout,
}
#[derive(Debug)]
pub struct UnicastStreamBuilder {
identity: Identity,
io: Compat<Stream>,
}
impl UnicastStreamBuilder {
pub(crate) fn new(identity: Identity, io: Compat<Stream>) -> Self {
Self { identity, io }
}
pub(crate) async fn build(
self,
manager: &Manager,
peer_id: PeerId,
) -> Result<UnicastStream, UnicastStreamError> {
let stream = UnicastStream::new_outbound(self.identity, self.io).await?;
manager
.state
.write()
.unwrap_or_else(PoisonError::into_inner)
.connected
.insert(peer_id, stream.remote_identity());
Ok(stream)
}
}

View file

@ -1,63 +0,0 @@
use ed25519_dalek::SigningKey;
use libp2p::identity::ed25519::{self};
use serde::{Deserialize, Serialize};
use crate::spacetunnel::{Identity, RemoteIdentity};
#[derive(Debug, Clone)]
pub struct Keypair(ed25519::Keypair);
impl Keypair {
#[must_use]
pub fn generate() -> Self {
Self(ed25519::Keypair::generate())
}
#[must_use]
pub fn to_identity(&self) -> Identity {
// This depends on libp2p implementation details which isn't great
SigningKey::from_keypair_bytes(&self.0.to_bytes())
.expect("Failed to convert 'ed25519::Keypair' into 'SigningKey'. They should have an identical representation.")
.into()
}
#[must_use]
pub fn to_remote_identity(&self) -> RemoteIdentity {
self.to_identity().to_remote_identity()
}
// TODO: Make this `pub(crate)`
#[must_use]
pub fn peer_id(&self) -> libp2p::PeerId {
let pk: libp2p::identity::PublicKey = self.0.public().into();
libp2p::PeerId::from_public_key(&pk)
}
#[must_use]
pub fn inner(&self) -> libp2p::identity::Keypair {
self.0.clone().into()
}
}
impl Serialize for Keypair {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_bytes(&self.0.to_bytes())
}
}
impl<'de> Deserialize<'de> for Keypair {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let mut bytes = Vec::<u8>::deserialize(deserializer)?;
Ok(Self(
ed25519::Keypair::try_from_bytes(bytes.as_mut_slice())
.map_err(serde::de::Error::custom)?,
))
}
}

View file

@ -1,10 +0,0 @@
use std::{collections::HashMap, fmt::Debug};
/// this trait must be implemented for the metadata type to allow it to be converted to MDNS DNS records.
pub trait Metadata: Debug + Clone + Send + Sync + 'static {
fn to_hashmap(self) -> HashMap<String, String>;
fn from_hashmap(data: &HashMap<String, String>) -> Result<Self, String>
where
Self: Sized;
}

View file

@ -1,7 +0,0 @@
mod keypair;
mod metadata;
mod multiaddr;
pub use keypair::*;
pub use metadata::*;
pub use multiaddr::*;

View file

@ -1,44 +0,0 @@
use std::net::{IpAddr, SocketAddr};
use libp2p::{multiaddr::Protocol, Multiaddr};
// TODO: Turn these into From/Into impls on a wrapper type
pub fn quic_multiaddr_to_socketaddr(m: Multiaddr) -> Result<SocketAddr, String> {
let mut addr_parts = m.iter();
let addr = match addr_parts.next() {
Some(Protocol::Ip4(addr)) => IpAddr::V4(addr),
Some(Protocol::Ip6(addr)) => IpAddr::V6(addr),
Some(proto) => {
return Err(format!(
"Invalid multiaddr. Segment 1 found protocol 'Ip4' or 'Ip6' but found '{proto}'"
))
}
None => return Err("Invalid multiaddr. Segment 1 missing".to_string()),
};
let port = match addr_parts.next() {
Some(Protocol::Udp(port)) => port,
Some(proto) => {
return Err(format!(
"Invalid multiaddr. Segment 2 expected protocol 'Udp' but found '{proto}'"
))
}
None => return Err("Invalid multiaddr. Segment 2 missing".to_string()),
};
Ok(SocketAddr::new(addr, port))
}
#[must_use]
pub fn socketaddr_to_quic_multiaddr(m: &SocketAddr) -> Multiaddr {
let mut addr = Multiaddr::empty();
match m {
SocketAddr::V4(ip) => addr.push(Protocol::Ip4(*ip.ip())),
SocketAddr::V6(ip) => addr.push(Protocol::Ip6(*ip.ip())),
}
addr.push(Protocol::Udp(m.port()));
addr.push(Protocol::QuicV1);
addr
}

View file

@ -1,6 +1,6 @@
[package]
name = "sd-p2p"
version = "0.1.0"
name = "sd-p2p2"
version = "0.2.0"
description = "Rust Peer to Peer Networking Library"
authors = ["Oscar Beaumont <oscar@otbeaumont.me>"]
readme = "README.md"
@ -8,17 +8,16 @@ license = { workspace = true }
repository = { workspace = true }
edition = { workspace = true }
# TODO: Remove features??? and dependencies
[features]
default = []
serde = []
specta = []
[dependencies]
base64 = { workspace = true }
pin-project-lite = { workspace = true }
serde = { workspace = true, features = [
"derive",
] } # TODO: Optional or remove feature
serde = { workspace = true, features = ["derive"] }
specta = { workspace = true }
thiserror = { workspace = true }
tokio = { workspace = true, features = [
@ -41,14 +40,18 @@ if-watch = { version = "=3.2.0", features = [
] } # Override the features of if-watch which is used by libp2p-quic
libp2p = { version = "0.53.2", features = ["tokio", "serde"] }
libp2p-quic = { version = "0.10.2", features = ["tokio"] }
libp2p-stream = "0.1.0-alpha"
mdns-sd = "0.10.3"
rand_core = { version = "0.6.4" }
streamunordered = "0.5.3"
zeroize = { version = "1.7.0", features = ["derive"] }
base91 = "0.1.0"
sha256 = "1.5.0"
stable-vec = "0.4.0"
hash_map_diff = "0.2.0"
sync_wrapper = "0.1.2"
[dev-dependencies]
tokio = { workspace = true, features = ["rt-multi-thread"] }
tracing-subscriber = { workspace = true, features = ["env-filter"] }
tokio = { workspace = true, features = ["rt-multi-thread", "macros"] }
tracing-subscriber = { version = "0.3.18" }
uuid = { workspace = true, features = ["v4"] }

130
crates/p2p2/src/hooks.rs Normal file
View file

@ -0,0 +1,130 @@
use std::{collections::HashSet, fmt, net::SocketAddr, sync::Arc};
use flume::Sender;
use tokio::sync::oneshot;
use crate::{Peer, RemoteIdentity};
#[derive(Debug, Clone)]
pub enum HookEvent {
/// `P2P::service` has changed
MetadataModified,
/// A new listener was registered with the P2P system.
ListenerRegistered(ListenerId),
/// A listener's address was added.
ListenerAddrAdded(ListenerId, SocketAddr),
/// A listener's address was removed.
ListenerAddrRemoved(ListenerId, SocketAddr),
/// A listener was unregistered from the P2P system.
ListenerUnregistered(ListenerId),
/// A peer was inserted into `P2P::peers`
/// This peer could have connected to or have been discovered by a hook.
PeerAvailable(Arc<Peer>),
/// A peer was removed from `P2P::peers`
/// This is due to it no longer being discovered, containing no active connections or available connection methods.
PeerUnavailable(RemoteIdentity),
/// A peer was discovered by a hook
/// This will fire for *every peer* per every *hook* that discovers it.
PeerDiscoveredBy(HookId, Arc<Peer>),
/// A hook expired a peer
/// This will fire for *every peer* per every *hook* that discovers it.
PeerExpiredBy(HookId, RemoteIdentity),
/// "Connections" are an internal concept to the P2P library but they will be automatically triggered by `Peer::new_stream`.
/// They are a concept users of the application may care about so they are exposed here.
/// A new listener established a connection with a peer
PeerConnectedWith(ListenerId, Arc<Peer>),
/// A connection closed with a peer.
PeerDisconnectedWith(ListenerId, RemoteIdentity),
/// Your hook or the P2P system was told to shutdown.
Shutdown {
// We can detect when this guard is dropped, it doesn't need to be used.
_guard: ShutdownGuard,
},
}
#[derive(Debug)]
pub struct ShutdownGuard(pub(crate) Option<oneshot::Sender<()>>);
impl ShutdownGuard {
pub(crate) fn new() -> (Self, oneshot::Receiver<()>) {
let (tx, rx) = oneshot::channel();
(Self(Some(tx)), rx)
}
}
impl Drop for ShutdownGuard {
fn drop(&mut self) {
if let Some(tx) = self.0.take() {
let _ = tx.send(());
}
}
}
impl Clone for ShutdownGuard {
fn clone(&self) -> Self {
Self(None)
}
}
#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)]
pub struct HookId(pub(crate) usize);
#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)]
pub struct ListenerId(pub(crate) usize);
impl From<ListenerId> for HookId {
fn from(value: ListenerId) -> Self {
Self(value.0)
}
}
#[derive(Debug)]
pub(crate) struct Hook {
/// A name used for debugging purposes.
pub(crate) name: &'static str,
/// A channel to send events to the hook.
/// This hooks implementing will be responsible for subscribing to this channel.
pub(crate) tx: Sender<HookEvent>,
/// If this hook is a listener this will be set.
pub(crate) listener: Option<ListenerData>,
}
impl Hook {
pub fn send(&self, event: HookEvent) {
let _ = self.tx.send(event);
}
pub fn acceptor(&self, id: ListenerId, peer: &Arc<Peer>, addrs: &HashSet<SocketAddr>) {
if let Some(listener) = &self.listener {
(listener.acceptor.0)(id, peer, addrs);
}
}
}
#[derive(Debug)]
pub(crate) struct ListenerData {
/// The address the listener is bound to.
/// These will be advertised by any discovery methods attached to the P2P system.
pub addrs: HashSet<SocketAddr>,
/// This is a function over a channel because we need to ensure the code runs prior to the peer being emitted to the application.
/// If not the peer would have no registered way to connect to it initially which would be confusing.
#[allow(clippy::type_complexity)]
pub acceptor:
HandlerFn<Arc<dyn Fn(ListenerId, &Arc<Peer>, &HashSet<SocketAddr>) + Send + Sync>>,
}
/// A little wrapper for functions to make them `Debug`.
#[derive(Clone)]
pub(crate) struct HandlerFn<F>(pub(crate) F);
impl<F> fmt::Debug for HandlerFn<F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "HandlerFn")
}
}

View file

@ -1,3 +1,5 @@
// TODO: Document all types in this file
use std::{
hash::{Hash, Hasher},
str::FromStr,
@ -177,7 +179,7 @@ pub enum IdentityOrRemoteIdentityErr {
InvalidFormat,
}
/// TODO
/// TODO: Remove this. I think it make security issues far too easy.
#[derive(Debug, PartialEq)]
pub enum IdentityOrRemoteIdentity {
Identity(Identity),

24
crates/p2p2/src/lib.rs Normal file
View file

@ -0,0 +1,24 @@
//! Rust Peer to Peer Networking Library
#![warn(clippy::all, clippy::unwrap_used, clippy::panic)]
pub(crate) mod hooks;
mod identity;
mod mdns;
mod p2p;
mod peer;
mod quic;
mod smart_guards;
mod stream;
pub use hooks::{HookEvent, HookId, ListenerId, ShutdownGuard};
pub use identity::{
Identity, IdentityErr, IdentityOrRemoteIdentity, IdentityOrRemoteIdentityErr, RemoteIdentity,
};
pub use mdns::Mdns;
pub use p2p::{Listener, P2P};
pub use peer::{ConnectionRequest, Peer};
pub use quic::{Libp2pPeerId, QuicTransport};
pub use smart_guards::SmartWriteGuard;
pub use stream::UnicastStream;
pub use flume;

212
crates/p2p2/src/mdns.rs Normal file
View file

@ -0,0 +1,212 @@
use std::{
collections::HashMap, net::SocketAddr, pin::Pin, str::FromStr, sync::Arc, time::Duration,
};
use flume::{bounded, Receiver};
use mdns_sd::{ServiceDaemon, ServiceEvent, ServiceInfo};
use tokio::time::{sleep_until, Instant, Sleep};
use tracing::{error, trace, warn};
use crate::{HookEvent, HookId, RemoteIdentity, ShutdownGuard, P2P};
/// The time between re-advertising the mDNS service.
const MDNS_READVERTISEMENT_INTERVAL: Duration = Duration::from_secs(60); // Every minute re-advertise
/// Multicast DNS (mDNS) is used for discovery of peers over local networks.
#[derive(Debug)]
pub struct Mdns {
p2p: Arc<P2P>,
hook_id: HookId,
}
impl Mdns {
pub fn spawn(p2p: Arc<P2P>) -> Result<Self, mdns_sd::Error> {
let (tx, rx) = bounded(15);
let hook_id = p2p.register_hook("mdns", tx);
start(p2p.clone(), hook_id, rx)?;
Ok(Self { p2p, hook_id })
}
pub async fn shutdown(self) {
self.p2p.unregister_hook(self.hook_id).await;
}
}
struct State {
hook_id: HookId,
p2p: Arc<P2P>,
service_domain: String,
service_name: String,
mdns_daemon: ServiceDaemon,
next_mdns_advertisement: Pin<Box<Sleep>>,
}
fn start(p2p: Arc<P2P>, hook_id: HookId, rx: Receiver<HookEvent>) -> Result<(), mdns_sd::Error> {
let service_domain = format!("_{}._udp.local.", p2p.app_name());
let mut state = State {
hook_id,
service_name: format!("{}.{service_domain}", p2p.remote_identity()),
service_domain,
p2p,
mdns_daemon: ServiceDaemon::new()?,
next_mdns_advertisement: Box::pin(sleep_until(
Instant::now() + MDNS_READVERTISEMENT_INTERVAL,
)),
};
let mdns_service = state.mdns_daemon.browse(&state.service_domain)?;
tokio::spawn(async move {
loop {
tokio::select! {
Ok(event) = rx.recv_async() => match event {
HookEvent::MetadataModified | HookEvent::ListenerRegistered(_) | HookEvent::ListenerAddrAdded(_, _) | HookEvent::ListenerAddrRemoved(_, _) | HookEvent::ListenerUnregistered(_) => advertise(&mut state),
HookEvent::Shutdown { _guard } => {
shutdown(_guard, &mut state);
break;
},
_ => continue,
},
_ = &mut state.next_mdns_advertisement => advertise(&mut state),
Ok(event) = mdns_service.recv_async() => on_event(&state, event)
};
}
});
Ok(())
}
fn advertise(state: &mut State) {
let mut ports_to_service = HashMap::new();
for addr in state.p2p.listeners().iter().flat_map(|l| l.addrs.clone()) {
ports_to_service
.entry(addr.port())
.or_insert_with(Vec::new)
.push(addr.ip());
}
let meta = state.p2p.metadata().clone();
for (port, ips) in ports_to_service {
let service = ServiceInfo::new(
&state.service_domain,
&state.p2p.remote_identity().to_string(),
&state.service_name,
&*ips,
port,
// TODO: If a piece of metadata overflows a DNS record take care of splitting it across multiple.
Some(meta.clone()),
)
.map(|s| s.enable_addr_auto());
let service = match service {
Ok(service) => service,
Err(err) => {
warn!("error creating mdns service info: {}", err);
continue;
}
};
trace!("advertising mdns service: {:?}", service);
match state.mdns_daemon.register(service) {
Ok(()) => {}
Err(err) => warn!("error registering mdns service: {}", err),
}
}
state.next_mdns_advertisement =
Box::pin(sleep_until(Instant::now() + MDNS_READVERTISEMENT_INTERVAL));
}
fn on_event(state: &State, event: ServiceEvent) {
match event {
ServiceEvent::ServiceResolved(info) => {
let Some(identity) = fullname_to_identity(state, info.get_fullname()) else {
return;
};
state.p2p.clone().discover_peer(
state.hook_id,
identity,
info.get_properties()
.iter()
.map(|p| (p.key().to_string(), p.val_str().to_string()))
.collect(),
info.get_addresses()
.iter()
.map(|addr| SocketAddr::new(*addr, info.get_port()))
.collect(),
);
}
ServiceEvent::ServiceRemoved(_, fullname) => {
let Some(identity) = fullname_to_identity(state, &fullname) else {
return;
};
if let Some(peer) = state.p2p.peers().get(&identity) {
peer.undiscover_peer(state.hook_id);
}
}
ServiceEvent::SearchStarted(_)
| ServiceEvent::SearchStopped(_)
| ServiceEvent::ServiceFound(_, _) => {}
}
}
fn fullname_to_identity(
State {
p2p,
service_domain,
..
}: &State,
fullname: &str,
) -> Option<RemoteIdentity> {
let Some(identity) = fullname
.strip_suffix(service_domain)
.map(|s| &s[0..s.len() - 1])
else {
warn!(
"resolved peer advertising itself with an invalid fullname '{}'",
fullname
);
return None;
};
let Ok(identity) = RemoteIdentity::from_str(identity) else {
warn!("resolved peer advertising itself with an invalid remote identity '{identity}'");
return None;
};
// Prevent discovery of the current peer.
if identity == p2p.remote_identity() {
return None;
}
Some(identity)
}
fn shutdown(_guard: ShutdownGuard, state: &mut State) {
if let Ok(chan) = state
.mdns_daemon
.unregister(&state.service_name)
.map_err(|err| {
error!(
"error removing mdns service '{}': {err}",
state.service_name
);
}) {
let _ = chan.recv();
};
// TODO: Without this mDNS is not sending it goodbye packets without a timeout. Try and remove this cause it makes shutdown slow.
std::thread::sleep(Duration::from_millis(100));
match state.mdns_daemon.shutdown() {
Ok(chan) => {
let _ = chan.recv();
}
Err(err) => {
error!("error shutting down mdns daemon: {err}");
}
}
}

386
crates/p2p2/src/p2p.rs Normal file
View file

@ -0,0 +1,386 @@
use std::{
collections::{hash_map::Entry, HashMap, HashSet},
net::SocketAddr,
sync::{Arc, PoisonError, RwLock, RwLockReadGuard},
time::Duration,
};
use flume::Sender;
use hash_map_diff::hash_map_diff;
use libp2p::futures::future::join_all;
use stable_vec::StableVec;
use tokio::{sync::oneshot, time::timeout};
use tracing::info;
use crate::{
hooks::{HandlerFn, Hook, HookEvent, ListenerData, ListenerId, ShutdownGuard},
smart_guards::SmartWriteGuard,
HookId, Identity, Peer, RemoteIdentity, UnicastStream,
};
/// Manager for the entire P2P system.
#[derive(Debug)]
pub struct P2P {
/// A unique identifier for the application.
/// This will differentiate between different applications using this same P2P library.
app_name: &'static str,
/// The identity of the local node.
/// This is the public/private keypair used to uniquely identify the node.
identity: Identity,
/// The channel is used by the application to handle incoming connections.
/// Connection's are automatically closed when dropped so if user forgets to subscribe to this that will just happen as expected.
handler_tx: Sender<UnicastStream>,
/// Metadata is shared from the local node to the remote nodes.
/// This will contain information such as the node's name, version, and services we provide.
metadata: RwLock<HashMap<String, String>>,
/// A list of all peers known to the P2P system. Be aware a peer could be connected and/or discovered at any time.
pub(crate) peers: RwLock<HashMap<RemoteIdentity, Arc<Peer>>>,
/// Hooks can be registered to react to state changes in the P2P system.
pub(crate) hooks: RwLock<StableVec<Hook>>,
}
impl P2P {
/// Construct a new P2P system.
pub fn new(
app_name: &'static str,
identity: Identity,
handler_tx: Sender<UnicastStream>,
) -> Arc<Self> {
app_name
.chars()
.all(|c| char::is_alphanumeric(c) || c == '-')
.then_some(())
.expect("'P2P::new': invalid app_name. Must be alphanumeric or '-' only.");
#[allow(clippy::panic)]
if app_name.len() > 12 {
panic!("'P2P::new': app_name too long. Must be 12 characters or less.");
}
Arc::new(P2P {
app_name,
identity,
metadata: Default::default(),
peers: Default::default(),
handler_tx,
hooks: Default::default(),
})
}
/// The unique identifier for this application.
pub fn app_name(&self) -> &'static str {
self.app_name
}
/// The identifier of this node that can *MUST* be kept secret.
/// This is a private key in crypto terms.
pub fn identity(&self) -> &Identity {
&self.identity
}
/// The identifier of this node that can be shared.
/// This is a public key in crypto terms.
pub fn remote_identity(&self) -> RemoteIdentity {
self.identity.to_remote_identity()
}
/// Metadata is shared from the local node to the remote nodes.
/// This will contain information such as the node's name, version, and services we provide.
pub fn metadata(&self) -> RwLockReadGuard<HashMap<String, String>> {
self.metadata.read().unwrap_or_else(PoisonError::into_inner)
}
pub fn metadata_mut(&self) -> SmartWriteGuard<HashMap<String, String>> {
let lock = self
.metadata
.write()
.unwrap_or_else(PoisonError::into_inner);
SmartWriteGuard::new(self, lock, |p2p, before, after| {
let diff = hash_map_diff(&before, after);
if diff.updated.is_empty() && diff.removed.is_empty() {
return;
}
p2p.hooks
.read()
.unwrap_or_else(PoisonError::into_inner)
.iter()
.for_each(|(_, hook)| {
hook.send(HookEvent::MetadataModified);
});
})
}
/// A list of all peers known to the P2P system. Be aware a peer could be connected and/or discovered at any time.
pub fn peers(&self) -> RwLockReadGuard<HashMap<RemoteIdentity, Arc<Peer>>> {
self.peers.read().unwrap_or_else(PoisonError::into_inner)
}
// TODO: Should this take `addrs`???, A connection through the Relay probs doesn't have one in the same form.
pub fn discover_peer(
self: Arc<Self>,
hook_id: HookId,
identity: RemoteIdentity,
metadata: HashMap<String, String>,
addrs: HashSet<SocketAddr>,
) -> Arc<Peer> {
let mut peers = self.peers.write().unwrap_or_else(PoisonError::into_inner);
let peer = peers.entry(identity);
let was_peer_inserted = matches!(peer, Entry::Vacant(_));
let peer = peer
.or_insert_with({
let p2p = self.clone();
|| Peer::new(identity, p2p)
})
.clone();
{
let mut state = peer.state.write().unwrap_or_else(PoisonError::into_inner);
state.discovered.insert(hook_id, addrs.clone());
}
peer.metadata_mut().extend(metadata);
{
let hooks = self.hooks.read().unwrap_or_else(PoisonError::into_inner);
hooks
.iter()
.for_each(|(id, hook)| hook.acceptor(ListenerId(id), &peer, &addrs));
if was_peer_inserted {
hooks
.iter()
.for_each(|(_, hook)| hook.send(HookEvent::PeerAvailable(peer.clone())));
}
hooks.iter().for_each(|(_, hook)| {
hook.send(HookEvent::PeerDiscoveredBy(hook_id, peer.clone()))
});
}
peer
}
pub fn connected_to(
self: Arc<Self>,
listener: ListenerId,
metadata: HashMap<String, String>,
stream: UnicastStream,
shutdown_tx: oneshot::Sender<()>,
) -> Arc<Peer> {
let identity = stream.remote_identity();
let mut peers = self.peers.write().unwrap_or_else(PoisonError::into_inner);
let peer = peers.entry(identity);
let was_peer_inserted = matches!(peer, Entry::Vacant(_));
let peer = peer
.or_insert_with({
let p2p = self.clone();
move || Peer::new(identity, p2p)
})
.clone();
{
let mut state = peer.state.write().unwrap_or_else(PoisonError::into_inner);
state.active_connections.insert(listener, shutdown_tx);
}
peer.metadata_mut().extend(metadata);
{
let hooks = self.hooks.read().unwrap_or_else(PoisonError::into_inner);
if was_peer_inserted {
hooks
.iter()
.for_each(|(_, hook)| hook.send(HookEvent::PeerAvailable(peer.clone())));
}
hooks.iter().for_each(|(_, hook)| {
hook.send(HookEvent::PeerConnectedWith(listener, peer.clone()))
});
}
let _ = self.handler_tx.send(stream);
peer
}
/// All active listeners registered with the P2P system.
pub fn listeners(&self) -> Vec<Listener> {
self.hooks
.read()
.unwrap_or_else(PoisonError::into_inner)
.iter()
.filter_map(|(id, hook)| {
hook.listener.as_ref().map(|listener| Listener {
id: ListenerId(id),
name: hook.name,
addrs: listener.addrs.clone(),
})
})
.collect()
}
/// A listener is a special type of hook which is responsible for accepting incoming connections.
///
/// It is expected you call `Self::register_listener_addr` after this to register the addresses you are listening on.
///
/// `acceptor` is called when a peer is discovered, but before it is emitted to the application.
/// This lets you register a connection method if you have one.
pub fn register_listener(
&self,
name: &'static str,
tx: Sender<HookEvent>,
acceptor: impl Fn(ListenerId, &Arc<Peer>, &HashSet<SocketAddr>) + Send + Sync + 'static,
) -> ListenerId {
let mut hooks = self.hooks.write().unwrap_or_else(PoisonError::into_inner);
let hook_id = hooks.push(Hook {
name,
tx,
listener: Some(ListenerData {
addrs: Default::default(),
acceptor: HandlerFn(Arc::new(acceptor)),
}),
});
hooks.iter().for_each(|(id, hook)| {
if id == hook_id {
return;
}
hook.send(HookEvent::ListenerRegistered(ListenerId(hook_id)));
});
ListenerId(hook_id)
}
pub fn register_listener_addr(&self, listener_id: ListenerId, addr: SocketAddr) {
let mut hooks = self.hooks.write().unwrap_or_else(PoisonError::into_inner);
if let Some(listener) = hooks
.get_mut(listener_id.0)
.and_then(|l| l.listener.as_mut())
{
listener.addrs.insert(addr);
}
info!("HookEvent::ListenerAddrAdded({listener_id:?}, {addr})");
hooks.iter().for_each(|(_, hook)| {
hook.send(HookEvent::ListenerAddrAdded(listener_id, addr));
});
}
pub fn unregister_listener_addr(&self, listener_id: ListenerId, addr: SocketAddr) {
let mut hooks = self.hooks.write().unwrap_or_else(PoisonError::into_inner);
if let Some(listener) = hooks
.get_mut(listener_id.0)
.and_then(|l| l.listener.as_mut())
{
listener.addrs.remove(&addr);
}
info!("HookEvent::ListenerAddrRemoved({listener_id:?}, {addr})");
hooks.iter().for_each(|(_, hook)| {
hook.send(HookEvent::ListenerAddrRemoved(listener_id, addr));
});
}
// TODO: Probs cleanup return type
pub fn hooks(&self) -> Vec<(HookId, &'static str)> {
self.hooks
.read()
.unwrap_or_else(PoisonError::into_inner)
.iter()
.map(|(id, hook)| (HookId(id), hook.name))
.collect()
}
/// Register a new hook which can be used to react to state changes in the P2P system.
pub fn register_hook(&self, name: &'static str, tx: Sender<HookEvent>) -> HookId {
HookId(
self.hooks
.write()
.unwrap_or_else(PoisonError::into_inner)
.push(Hook {
name,
tx,
listener: None,
}),
)
}
/// Unregister a hook. This will also call `HookEvent::Shutdown` on the hook.
pub async fn unregister_hook(&self, id: HookId) {
let mut shutdown_rxs = Vec::new();
{
let mut hooks = self.hooks.write().unwrap_or_else(PoisonError::into_inner);
if let Some(hook) = hooks.remove(id.0) {
let (_guard, rx) = ShutdownGuard::new();
shutdown_rxs.push(rx);
hook.send(HookEvent::Shutdown { _guard });
if hook.listener.is_some() {
hooks.iter().for_each(|(_, hook)| {
hook.send(HookEvent::ListenerUnregistered(ListenerId(id.0)));
});
}
let mut peers = self.peers.write().unwrap_or_else(PoisonError::into_inner);
let mut peers_to_remove = HashSet::new(); // We are mutate while iterating
for (identity, peer) in peers.iter_mut() {
let mut state = peer.state.write().unwrap_or_else(PoisonError::into_inner);
if let Some(active_connection) =
state.active_connections.remove(&ListenerId(id.0))
{
let _ = active_connection.send(());
}
state.connection_methods.remove(&ListenerId(id.0));
state.discovered.remove(&id);
if state.connection_methods.is_empty() && state.discovered.is_empty() {
peers_to_remove.insert(*identity);
}
}
for identity in peers_to_remove {
peers.remove(&identity);
}
}
}
// We rely on the fact that when the oneshot is dropped this will return an error as opposed to hanging.
// So we can detect when the hooks shutdown code has completed.
let _ = timeout(Duration::from_secs(2), join_all(shutdown_rxs)).await;
}
/// Shutdown the whole P2P system.
/// This will close all connections and remove all hooks.
pub async fn shutdown(&self) {
let hooks = {
self.hooks
.write()
.unwrap_or_else(PoisonError::into_inner)
.iter()
.map(|i| i.0)
.collect::<Vec<_>>()
.clone()
};
for hook_id in hooks {
self.unregister_hook(HookId(hook_id)).await;
}
}
}
#[derive(Debug)]
#[non_exhaustive]
pub struct Listener {
pub id: ListenerId,
pub name: &'static str,
pub addrs: HashSet<SocketAddr>,
}
impl Listener {
pub fn is_hook_id(&self, id: HookId) -> bool {
self.id.0 == id.0
}
}

267
crates/p2p2/src/peer.rs Normal file
View file

@ -0,0 +1,267 @@
use std::{
collections::{HashMap, HashSet},
net::SocketAddr,
sync::{Arc, PoisonError, RwLock, RwLockReadGuard, RwLockWriteGuard, Weak},
};
use thiserror::Error;
use tokio::sync::{mpsc, oneshot};
use tracing::warn;
use crate::{HookEvent, HookId, ListenerId, RemoteIdentity, UnicastStream, P2P};
#[derive(Debug)]
pub struct Peer {
/// RemoteIdentity of the peer.
pub(crate) identity: RemoteIdentity,
/// Information from `P2P::service` on the remote node.
pub(crate) metadata: RwLock<HashMap<String, String>>,
/// We want these states to locked by the same lock so we can ensure they are consistent.
pub(crate) state: RwLock<State>,
/// A reference back to the P2P system.
/// This is weak so we don't have recursive `Arc`'s that can never be dropped.
pub(crate) p2p: Weak<P2P>,
}
#[derive(Debug, Default)]
pub(crate) struct State {
/// Active connections with the remote
pub(crate) active_connections: HashMap<ListenerId, oneshot::Sender<()>>,
/// Methods for establishing an active connections with the remote
/// These should be inject by `Listener::acceptor` which is called when a new peer is discovered.
pub(crate) connection_methods: HashMap<ListenerId, mpsc::Sender<ConnectionRequest>>,
/// Methods that have discovered this peer.
pub(crate) discovered: HashMap<HookId, HashSet<SocketAddr>>,
}
/// A request to connect to a client.
/// This will be handled by a configured listener hook.
#[derive(Debug)]
#[non_exhaustive]
pub struct ConnectionRequest {
pub to: RemoteIdentity,
pub addrs: HashSet<SocketAddr>,
pub tx: oneshot::Sender<Result<UnicastStream, String>>,
}
// TODO: Maybe use this?
// impl State {
// pub(crate) fn needs_removal(&self) -> bool {
// self.discovered.is_empty()
// && self.connection_methods.is_empty()
// && self.active_connections.is_empty()
// }
// }
impl Eq for Peer {}
impl PartialEq for Peer {
fn eq(&self, other: &Self) -> bool {
self.identity == other.identity
}
}
// Internal methods
impl Peer {
pub(crate) fn new(identity: RemoteIdentity, p2p: Arc<P2P>) -> Arc<Self> {
Arc::new(Self {
identity,
metadata: Default::default(),
state: Default::default(),
p2p: Arc::downgrade(&p2p),
})
}
}
// User-facing methods
impl Peer {
pub fn identity(&self) -> RemoteIdentity {
self.identity
}
pub fn metadata(&self) -> RwLockReadGuard<HashMap<String, String>> {
self.metadata.read().unwrap_or_else(PoisonError::into_inner)
}
pub fn metadata_mut(&self) -> RwLockWriteGuard<HashMap<String, String>> {
self.metadata
.write()
.unwrap_or_else(PoisonError::into_inner)
}
pub fn can_connect(&self) -> bool {
!self
.state
.read()
.unwrap_or_else(PoisonError::into_inner)
.connection_methods
.is_empty()
}
pub fn is_connected(&self) -> bool {
!self
.state
.read()
.unwrap_or_else(PoisonError::into_inner)
.active_connections
.is_empty()
}
pub fn active_connections(&self) -> usize {
self.state
.read()
.unwrap_or_else(PoisonError::into_inner)
.active_connections
.len()
}
pub fn connection_methods(&self) -> HashSet<ListenerId> {
self.state
.read()
.unwrap_or_else(PoisonError::into_inner)
.connection_methods
.keys()
.copied()
.collect()
}
pub fn discovered_by(&self) -> HashSet<HookId> {
self.state
.read()
.unwrap_or_else(PoisonError::into_inner)
.discovered
.keys()
.copied()
.collect()
}
/// Construct a new Quic stream to the peer.
pub async fn new_stream(&self) -> Result<UnicastStream, NewStreamError> {
let (addrs, connect_tx) = {
let state = self.state.read().unwrap_or_else(PoisonError::into_inner);
let addrs = state
.discovered
.values()
.flatten()
.cloned()
.collect::<HashSet<_>>();
let Some((_id, connect_tx)) = state
.connection_methods
.iter()
.map(|(id, tx)| (*id, tx.clone()))
.next()
else {
return Err(NewStreamError::NoConnectionMethodsAvailable);
};
(addrs, connect_tx)
};
let (tx, rx) = oneshot::channel();
connect_tx
.send(ConnectionRequest {
to: self.identity,
addrs,
tx,
})
.await
.map_err(|err| {
warn!("Failed to send connect request to peer: {}", err);
NewStreamError::EventLoopOffline(err)
})?;
rx.await
.map_err(|err| {
warn!("Failed to receive connect response from peer: {err}");
NewStreamError::ConnectionNeverEstablished(err)
})?
.map_err(|err| {
warn!("Failed to do the thing: {err}");
NewStreamError::Connecting(err)
})
}
}
// Hook-facing methods
impl Peer {
pub fn hook_discovered(&self, hook: HookId, addrs: HashSet<SocketAddr>) {
// TODO: Emit event maybe???
self.state
.write()
.unwrap_or_else(PoisonError::into_inner)
.discovered
.insert(hook, addrs);
}
pub fn listener_available(&self, listener: ListenerId, tx: mpsc::Sender<ConnectionRequest>) {
self.state
.write()
.unwrap_or_else(PoisonError::into_inner)
.connection_methods
.insert(listener, tx);
}
pub fn undiscover_peer(&self, hook_id: HookId) {
let Some(p2p) = self.p2p.upgrade() else {
return;
};
let mut state = self.state.write().unwrap_or_else(PoisonError::into_inner);
state.discovered.remove(&hook_id);
let hooks = p2p.hooks.read().unwrap_or_else(PoisonError::into_inner);
hooks.iter().for_each(|(_, hook)| {
hook.send(HookEvent::PeerExpiredBy(hook_id, self.identity));
});
if state.connection_methods.is_empty() && state.discovered.is_empty() {
p2p.peers
.write()
.unwrap_or_else(PoisonError::into_inner)
.remove(&self.identity);
hooks.iter().for_each(|(_, hook)| {
hook.send(HookEvent::PeerUnavailable(self.identity));
});
}
}
pub fn disconnected_from(&self, listener_id: ListenerId) {
let Some(p2p) = self.p2p.upgrade() else {
return;
};
let mut state = self.state.write().unwrap_or_else(PoisonError::into_inner);
state.connection_methods.remove(&listener_id);
state.active_connections.remove(&listener_id);
let hooks = p2p.hooks.read().unwrap_or_else(PoisonError::into_inner);
hooks.iter().for_each(|(_, hook)| {
hook.send(HookEvent::PeerDisconnectedWith(listener_id, self.identity));
});
if state.connection_methods.is_empty() && state.discovered.is_empty() {
p2p.peers
.write()
.unwrap_or_else(PoisonError::into_inner)
.remove(&self.identity);
hooks.iter().for_each(|(_, hook)| {
hook.send(HookEvent::PeerUnavailable(self.identity));
});
}
}
}
#[derive(Debug, Error)]
pub enum NewStreamError {
#[error("No connection methods available for peer")]
NoConnectionMethodsAvailable,
#[error("The event loop is offline")]
EventLoopOffline(mpsc::error::SendError<ConnectionRequest>),
#[error("Failed to establish the connection w/ error: {0}")]
ConnectionNeverEstablished(oneshot::error::RecvError),
#[error("error connecting to peer: {0}")]
Connecting(String),
}

View file

@ -0,0 +1,4 @@
pub(super) mod transport;
pub(super) mod utils;
pub use transport::{Libp2pPeerId, QuicTransport};

View file

@ -0,0 +1,372 @@
use std::{
collections::{HashMap, HashSet},
convert::Infallible,
net::{Ipv4Addr, Ipv6Addr, SocketAddr},
sync::{Arc, PoisonError, RwLock},
time::Duration,
};
use flume::{bounded, Receiver, Sender};
use libp2p::{
core::muxing::StreamMuxerBox,
futures::{AsyncReadExt, AsyncWriteExt, StreamExt},
swarm::SwarmEvent,
StreamProtocol, Swarm, SwarmBuilder, Transport,
};
use libp2p_stream::Behaviour;
use tokio::{
net::TcpListener,
sync::{mpsc, oneshot},
time::timeout,
};
use tokio_util::compat::FuturesAsyncReadCompatExt;
use tracing::{debug, warn};
use crate::{
identity::REMOTE_IDENTITY_LEN,
quic::utils::{
identity_to_libp2p_keypair, remote_identity_to_libp2p_peerid, socketaddr_to_quic_multiaddr,
},
ConnectionRequest, HookEvent, ListenerId, RemoteIdentity, UnicastStream, P2P,
};
const PROTOCOL: StreamProtocol = StreamProtocol::new("/sdp2p/1");
/// [libp2p::PeerId] for debugging purposes only.
#[derive(Debug)]
pub struct Libp2pPeerId(libp2p::PeerId);
#[derive(Debug)]
enum InternalEvent {
RegisterListener {
id: ListenerId,
ipv4: bool,
addr: SocketAddr,
result: oneshot::Sender<Result<(), String>>,
},
UnregisterListener {
id: ListenerId,
ipv4: bool,
result: oneshot::Sender<Result<(), String>>,
},
}
/// Transport using Quic to establish a connection between peers.
/// This uses `libp2p` internally.
#[derive(Debug)]
pub struct QuicTransport {
id: ListenerId,
p2p: Arc<P2P>,
internal_tx: Sender<InternalEvent>,
}
impl QuicTransport {
/// Spawn the `QuicTransport` and register it with the P2P system.
/// Be aware spawning this does nothing unless you call `Self::set_ipv4_enabled`/`Self::set_ipv6_enabled` to enable the listeners.
// TODO: Error type here
pub fn spawn(p2p: Arc<P2P>) -> Result<(Self, Libp2pPeerId), String> {
let keypair = identity_to_libp2p_keypair(p2p.identity());
let libp2p_peer_id = Libp2pPeerId(keypair.public().to_peer_id());
let (tx, rx) = bounded(15);
let (internal_tx, internal_rx) = bounded(15);
let (connect_tx, connect_rx) = mpsc::channel(15);
let id = p2p.register_listener("libp2p-quic", tx, move |listener_id, peer, _addrs| {
// TODO: I don't love this always being registered. Really it should only show up if the other device is online (do a ping-type thing)???
peer.listener_available(listener_id, connect_tx.clone());
});
let swarm = ok(ok(SwarmBuilder::with_existing_identity(keypair)
.with_tokio()
.with_other_transport(|keypair| {
libp2p_quic::GenTransport::<libp2p_quic::tokio::Provider>::new(
libp2p_quic::Config::new(keypair),
)
.map(|(p, c), _| (p, StreamMuxerBox::new(c)))
.boxed()
}))
.with_behaviour(|_| Behaviour::new()))
.with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60)))
.build();
tokio::spawn(start(p2p.clone(), id, swarm, rx, internal_rx, connect_rx));
Ok((
Self {
id,
p2p,
internal_tx,
},
libp2p_peer_id,
))
}
// `None` on the port means disabled. Use `0` for random port.
pub async fn set_ipv4_enabled(&self, port: Option<u16>) -> Result<(), String> {
self.setup_listener(
port.map(|p| SocketAddr::from((Ipv4Addr::UNSPECIFIED, p))),
true,
)
.await
}
pub async fn set_ipv6_enabled(&self, port: Option<u16>) -> Result<(), String> {
self.setup_listener(
port.map(|p| SocketAddr::from((Ipv6Addr::UNSPECIFIED, p))),
false,
)
.await
}
// TODO: Proper error type
async fn setup_listener(&self, addr: Option<SocketAddr>, ipv4: bool) -> Result<(), String> {
let (tx, rx) = oneshot::channel();
let event = if let Some(mut addr) = addr {
if addr.port() == 0 {
#[allow(clippy::unwrap_used)] // TODO: Error handling
addr.set_port(
TcpListener::bind(addr)
.await
.unwrap()
.local_addr()
.unwrap()
.port(),
);
}
InternalEvent::RegisterListener {
id: self.id,
ipv4,
addr,
result: tx,
}
} else {
InternalEvent::UnregisterListener {
id: self.id,
ipv4,
result: tx,
}
};
let Ok(_) = self.internal_tx.send(event) else {
return Err("internal channel closed".to_string());
};
rx.await
.map_err(|_| "internal response channel closed".to_string())
.and_then(|r| r)
}
pub async fn shutdown(self) {
self.p2p.unregister_hook(self.id.into()).await;
}
}
fn ok<T>(v: Result<T, Infallible>) -> T {
match v {
Ok(v) => v,
Err(_) => unreachable!(),
}
}
async fn start(
p2p: Arc<P2P>,
id: ListenerId,
mut swarm: Swarm<Behaviour>,
rx: Receiver<HookEvent>,
internal_rx: Receiver<InternalEvent>,
mut connect_rx: mpsc::Receiver<ConnectionRequest>,
) {
let mut ipv4_listener = None;
let mut ipv6_listener = None;
let mut control = swarm.behaviour().new_control();
#[allow(clippy::unwrap_used)] // TODO: Error handling
let mut incoming = control.accept(PROTOCOL).unwrap();
let map = Arc::new(RwLock::new(HashMap::new()));
loop {
tokio::select! {
Ok(event) = rx.recv_async() => match event {
HookEvent::PeerExpiredBy(_, identity) => {
println!("CHECKING {:?}", identity); // TODO
let Some(peer) = p2p.peers.read().unwrap_or_else(PoisonError::into_inner).get(&identity).map(Clone::clone) else {
continue;
};
let addrs = {
let state = peer.state.read().unwrap_or_else(PoisonError::into_inner);
state
.discovered
.values()
.flatten()
.cloned()
.collect::<HashSet<_>>()
};
let peer_id = remote_identity_to_libp2p_peerid(&identity);
let mut control = control.clone();
tokio::spawn(async move {
match timeout(Duration::from_secs(5), control.open_stream_with_addrs(
peer_id,
PROTOCOL,
addrs.iter()
.map(socketaddr_to_quic_multiaddr)
.collect()
)).await {
Ok(Ok(_)) => {}
Err(_) | Ok(Err(_)) => peer.disconnected_from(id),
};
});
},
HookEvent::Shutdown { _guard } => {
let connected_peers = swarm.connected_peers().cloned().collect::<Vec<_>>();
for peer_id in connected_peers {
let _ = swarm.disconnect_peer_id(peer_id);
}
if let Some((id, _)) = ipv4_listener.take() {
let _ = swarm.remove_listener(id);
}
if let Some((id, _)) = ipv6_listener.take() {
let _ = swarm.remove_listener(id);
}
// TODO: We don't break the event loop so libp2p can be polled to keep cleaning up.
// break;
},
_ => {},
},
Some((peer_id, mut stream)) = incoming.next() => {
let p2p = p2p.clone();
let map = map.clone();
tokio::spawn(async move {
let mut actual = [0; REMOTE_IDENTITY_LEN];
match stream.read_exact(&mut actual).await {
Ok(_) => {},
Err(e) => {
warn!("Failed to read remote identity with libp2p::PeerId({peer_id:?}): {e:?}");
return;
},
}
let identity = match RemoteIdentity::from_bytes(&actual) {
Ok(i) => i,
Err(e) => {
warn!("Failed to parse remote identity with libp2p::PeerId({peer_id:?}): {e:?}");
return;
},
};
// We need to go `PeerId -> RemoteIdentity` but as `PeerId` is a hash that's impossible.
// So to make this work the connection initiator will send their remote identity.
// It is however untrusted as they could send anything, so we convert it to a PeerId and check it matches the PeerId for this connection.
// If it matches, we are certain they own the private key as libp2p takes care of ensuring the PeerId is trusted.
let remote_identity_peer_id = remote_identity_to_libp2p_peerid(&identity);
if peer_id != remote_identity_peer_id {
warn!("Derived remote identity '{remote_identity_peer_id:?}' does not match libp2p::PeerId({peer_id:?})");
return;
}
map.write().unwrap_or_else(PoisonError::into_inner).insert(peer_id, identity);
// TODO: Sync metadata
let metadata = HashMap::new();
let stream = UnicastStream::new(identity, stream.compat());
let (shutdown_tx, shutdown_rx) = oneshot::channel();
p2p.connected_to(
id,
metadata,
stream,
shutdown_tx,
);
debug!("established inbound stream with '{}'", identity);
let _todo = shutdown_rx; // TODO: Handle `shutdown_rx`
});
},
event = swarm.select_next_some() => if let SwarmEvent::ConnectionClosed { peer_id, num_established: 0, .. } = event {
let Some(identity) = map.write().unwrap_or_else(PoisonError::into_inner).remove(&peer_id) else {
warn!("Tried to remove a peer that wasn't in the map.");
continue;
};
let peers = p2p.peers.read().unwrap_or_else(PoisonError::into_inner);
let Some(peer) = peers.get(&identity) else {
warn!("Tried to remove a peer that wasn't in the P2P system.");
continue;
};
peer.disconnected_from(id);
},
Ok(event) = internal_rx.recv_async() => match event {
InternalEvent::RegisterListener { id, ipv4, addr, result } => {
match swarm.listen_on(socketaddr_to_quic_multiaddr(&addr)) {
Ok(libp2p_listener_id) => {
let this = match ipv4 {
true => &mut ipv4_listener,
false => &mut ipv6_listener,
};
// TODO: Diff the `addr` & if it's changed actually update it
if this.is_none() {
*this = Some((libp2p_listener_id, addr));
p2p.register_listener_addr(id, addr);
}
let _ = result.send(Ok(()));
},
Err(e) => {
let _ = result.send(Err(e.to_string()));
},
}
},
InternalEvent::UnregisterListener { id, ipv4, result } => {
let this = match ipv4 {
true => &mut ipv4_listener,
false => &mut ipv6_listener,
};
if let Some((addr_id, addr)) = this.take() {
if swarm.remove_listener(addr_id) {
p2p.unregister_listener_addr(id, addr);
}
}
let _ = result.send(Ok(()));
},
},
Some(req) = connect_rx.recv() => {
let mut control = control.clone();
let self_remote_identity = p2p.identity().to_remote_identity();
let map = map.clone();
tokio::spawn(async move {
let peer_id = remote_identity_to_libp2p_peerid(&req.to);
match control.open_stream_with_addrs(
peer_id,
PROTOCOL,
req.addrs.iter()
.map(socketaddr_to_quic_multiaddr)
.collect()
).await {
Ok(mut stream) => {
map.write().unwrap_or_else(PoisonError::into_inner).insert(peer_id, req.to);
match stream.write_all(&self_remote_identity.get_bytes()).await {
Ok(_) => {
debug!("Established outbound stream with '{}'", req.to);
let _ = req.tx.send(Ok(UnicastStream::new(req.to, stream.compat())));
},
Err(e) => {
let _ = req.tx.send(Err(e.to_string()));
},
}
},
Err(e) => {
let _ = req.tx.send(Err(e.to_string()));
},
}
});
}
}
}
}

View file

@ -0,0 +1,38 @@
//! This file contains some fairly meaningless glue code for integrating with libp2p.
use std::net::SocketAddr;
use libp2p::{identity::Keypair, multiaddr::Protocol, Multiaddr, PeerId};
use crate::{Identity, RemoteIdentity};
#[must_use]
pub(crate) fn socketaddr_to_quic_multiaddr(m: &SocketAddr) -> Multiaddr {
let mut addr = Multiaddr::empty();
match m {
SocketAddr::V4(ip) => addr.push(Protocol::Ip4(*ip.ip())),
SocketAddr::V6(ip) => addr.push(Protocol::Ip6(*ip.ip())),
}
addr.push(Protocol::Udp(m.port()));
addr.push(Protocol::QuicV1);
addr
}
// This is sketchy, but it makes the whole system a lot easier to work with
// We are assuming the libp2p `PublicKey` is the same format as our `RemoteIdentity` type.
// This is *acktually* true but they reserve the right to change it at any point.
#[must_use]
pub fn remote_identity_to_libp2p_peerid(identity: &RemoteIdentity) -> PeerId {
let public_key = libp2p::identity::ed25519::PublicKey::try_from_bytes(&identity.get_bytes())
.expect("should be the same format");
PeerId::from_public_key(&public_key.into())
}
// This is sketchy, but it makes the whole system a lot easier to work with
// We are assuming the libp2p `Keypair` is the same format as our `Identity` type.
// This is *acktually* true but they reserve the right to change it at any point.
#[must_use]
pub fn identity_to_libp2p_keypair(identity: &Identity) -> Keypair {
libp2p::identity::Keypair::ed25519_from_bytes(identity.to_bytes())
.expect("should be the same format")
}

View file

@ -0,0 +1,55 @@
use std::{
ops::{Deref, DerefMut},
sync::RwLockWriteGuard,
};
use crate::P2P;
type SaveFn<T> = fn(&P2P, /* before */ T, /* after */ &T);
/// A special guard for `RwLockWriteGuard` that will call a `save` function when it's dropped.
/// This allows changes to the value to automatically trigger `HookEvents` to be emitted.
#[derive(Debug)]
pub struct SmartWriteGuard<'a, T> {
p2p: &'a P2P,
lock: RwLockWriteGuard<'a, T>,
before: Option<T>,
save: SaveFn<T>,
}
impl<'a, T: Clone> SmartWriteGuard<'a, T> {
pub(crate) fn new(p2p: &'a P2P, lock: RwLockWriteGuard<'a, T>, save: SaveFn<T>) -> Self {
Self {
p2p,
before: Some(lock.clone()),
lock,
save,
}
}
}
impl<'a, T> Deref for SmartWriteGuard<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.lock
}
}
impl<'a, T> DerefMut for SmartWriteGuard<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.lock
}
}
impl<'a, T> Drop for SmartWriteGuard<'a, T> {
fn drop(&mut self) {
(self.save)(
self.p2p,
self.before
.take()
.expect("'SmartWriteGuard::drop' called more than once!"),
&self.lock,
);
}
}

81
crates/p2p2/src/stream.rs Normal file
View file

@ -0,0 +1,81 @@
use std::{
fmt, io,
pin::Pin,
task::{Context, Poll},
};
use sync_wrapper::SyncWrapper;
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadBuf};
use crate::RemoteIdentity;
trait IoStream: AsyncRead + AsyncWrite {}
impl<S: AsyncRead + AsyncWrite> IoStream for S {}
/// A unicast stream is a direct stream to a specific peer.
pub struct UnicastStream {
io: SyncWrapper<Pin<Box<dyn IoStream + Send>>>,
remote: RemoteIdentity,
}
impl fmt::Debug for UnicastStream {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("UnicastStream")
.field("remote", &self.remote)
.finish()
}
}
impl UnicastStream {
pub fn new<S: AsyncRead + AsyncWrite + Send + 'static>(remote: RemoteIdentity, io: S) -> Self {
Self {
io: SyncWrapper::new(Box::pin(io)),
remote,
}
}
#[must_use]
pub fn remote_identity(&self) -> RemoteIdentity {
self.remote
}
pub async fn close(self) -> Result<(), io::Error> {
self.io.into_inner().shutdown().await
}
}
impl AsyncRead for UnicastStream {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
Pin::new(&mut self.get_mut().io)
.get_pin_mut()
.poll_read(cx, buf)
}
}
impl AsyncWrite for UnicastStream {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.get_mut().io)
.get_pin_mut()
.poll_write(cx, buf)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.get_mut().io)
.get_pin_mut()
.poll_flush(cx)
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.get_mut().io)
.get_pin_mut()
.poll_shutdown(cx)
}
}

View file

@ -1,15 +1,24 @@
import { useBridgeQuery, useCache, useConnectedPeers, useNodes } from '@sd/client';
import {
useBridgeMutation,
useBridgeQuery,
useCache,
useConnectedPeers,
useDiscoveredPeers,
useNodes
} from '@sd/client';
import { Button, toast } from '@sd/ui';
export const Component = () => {
const node = useBridgeQuery(['nodeState']);
return (
<div className="p-4">
{node.data?.p2p_enabled === false ? (
{/* {node.data?.p2p_enabled === false ? (
<h1 className="text-red-500">P2P is disabled. Please enable it in settings!</h1>
) : (
<Page />
)}
)} */}
<Page />
</div>
);
};
@ -20,19 +29,46 @@ function Page() {
});
const result = useBridgeQuery(['library.list']);
const connectedPeers = useConnectedPeers();
const discoveredPeers = useDiscoveredPeers();
useNodes(result.data?.nodes);
const libraries = useCache(result.data?.items);
const debugConnect = useBridgeMutation(['p2p.debugConnect'], {
onSuccess: () => {
toast.success('Connected!');
},
onError: (e) => {
toast.error(`Error connecting '${e.message}'`);
}
});
return (
<div className="flex flex-col space-y-8">
<div>
<h1 className="mt-4">Connected to:</h1>
{connectedPeers.size === 0 && <p className="pl-2">None</p>}
{[...connectedPeers.entries()].map(([id, node]) => (
<div key={id} className="flex space-x-2">
<p>{id}</p>
</div>
))}
<div className="flex justify-around">
<div>
<h1 className="mt-4">Discovered:</h1>
{discoveredPeers.size === 0 && <p className="pl-2">None</p>}
{[...discoveredPeers.entries()].map(([id, _node]) => (
<div key={id} className="flex space-x-2">
<p>{id}</p>
<Button
variant="accent"
onClick={() => debugConnect.mutate(id)}
disabled={debugConnect.isLoading}
>
Connect
</Button>
</div>
))}
</div>
<div>
<h1 className="mt-4">Connected to:</h1>
{connectedPeers.size === 0 && <p className="pl-2">None</p>}
{[...connectedPeers.entries()].map(([id, node]) => (
<div key={id} className="flex space-x-2">
<p>{id}</p>
</div>
))}
</div>
</div>
<div>

View file

@ -53,9 +53,9 @@ export const Component = () => {
schema: z
.object({
name: z.string().min(1).max(250).optional(),
p2p_enabled: z.boolean().optional(),
p2p_port: u16,
customOrDefault: z.enum(['Custom', 'Default']),
// p2p_enabled: z.boolean().optional(),
// p2p_port: u16,
// customOrDefault: z.enum(['Custom', 'Default']),
image_labeler_version: z.string().optional(),
background_processing_percentage: z.coerce
.number({
@ -69,25 +69,28 @@ export const Component = () => {
reValidateMode: 'onChange',
defaultValues: {
name: node.data?.name,
p2p_port: node.data?.p2p_port || 0,
p2p_enabled: node.data?.p2p_enabled,
customOrDefault: node.data?.p2p_port ? 'Custom' : 'Default',
// p2p_port: node.data?.p2p_port || 0,
// p2p_enabled: node.data?.p2p_enabled,
// customOrDefault: node.data?.p2p_port ? 'Custom' : 'Default',
image_labeler_version: node.data?.image_labeler_version ?? undefined,
background_processing_percentage:
node.data?.preferences.thumbnailer.background_processing_percentage || 50
}
});
const watchCustomOrDefault = form.watch('customOrDefault');
const watchP2pEnabled = form.watch('p2p_enabled');
// const watchCustomOrDefault = form.watch('customOrDefault');
// const watchP2pEnabled = form.watch('p2p_enabled');
const watchBackgroundProcessingPercentage = form.watch('background_processing_percentage');
useDebouncedFormWatch(form, async (value) => {
if (await form.trigger()) {
await editNode.mutateAsync({
name: value.name || null,
p2p_port: value.customOrDefault === 'Default' ? 0 : Number(value.p2p_port),
p2p_enabled: value.p2p_enabled ?? null,
p2p_ipv4_port: null,
p2p_ipv6_port: null,
p2p_discovery: null,
// p2p_port: value.customOrDefault === 'Default' ? 0 : Number(value.p2p_port),
// p2p_enabled: value.p2p_enabled ?? null,
image_labeler_version: value.image_labeler_version ?? null
});
@ -101,11 +104,11 @@ export const Component = () => {
node.refetch();
});
form.watch((data) => {
if (Number(data.p2p_port) > 65535) {
form.setValue('p2p_port', 65535);
}
});
// form.watch((data) => {
// if (Number(data.p2p_port) > 65535) {
// form.setValue('p2p_port', 65535);
// }
// });
const { t } = useLocale();
@ -124,13 +127,13 @@ export const Component = () => {
<NodePill>
{connectedPeers.size} {t('peers')}
</NodePill>
{node.data?.p2p_enabled === true ? (
{/* {node.data?.p2p_enabled === true ? (
<NodePill className="!bg-accent text-white">
{t('running')}
</NodePill>
) : (
<NodePill className="text-white">{t('disabled')}</NodePill>
)}
)} */}
</div>
</div>
@ -321,11 +324,12 @@ export const Component = () => {
{/* TODO: Switch doesn't handle optional fields correctly */}
<Switch
size="md"
checked={watchP2pEnabled || false}
onClick={() => form.setValue('p2p_enabled', !form.getValues('p2p_enabled'))}
// checked={watchP2pEnabled || false}
// onClick={() => form.setValue('p2p_enabled', !form.getValues('p2p_enabled'))}
disabled
/>
</Setting>
<Setting
{/* <Setting
mini
title={t('networking_port')}
description={t('networking_port_description')}
@ -367,7 +371,7 @@ export const Component = () => {
}}
/>
</div>
</Setting>
</Setting> */}
</div>
</FormProvider>
);

View file

@ -3,47 +3,47 @@ import { useBridgeQuery, useFeatureFlag, useP2PEvents, withFeatureFlag } from '@
import { toast } from '@sd/ui';
export function useP2PErrorToast() {
const nodeState = useBridgeQuery(['nodeState']);
const [didShowError, setDidShowError] = useState({
ipv4: false,
ipv6: false
});
// const nodeState = useBridgeQuery(['nodeState']);
// const [didShowError, setDidShowError] = useState({
// ipv4: false,
// ipv6: false
// });
// TODO: This can probally be improved in the future. Theorically if you enable -> disable -> then enable and it fails both enables the error won't be shown.
useEffect(() => {
const ipv4Error =
(nodeState.data?.p2p_enabled && nodeState.data?.p2p.ipv4.status === 'Error') || false;
const ipv6Error =
(nodeState.data?.p2p_enabled && nodeState.data?.p2p.ipv6.status === 'Error') || false;
// // TODO: This can probally be improved in the future. Theorically if you enable -> disable -> then enable and it fails both enables the error won't be shown.
// useEffect(() => {
// const ipv4Error =
// (nodeState.data?.p2p_enabled && nodeState.data?.p2p.ipv4.status === 'Error') || false;
// const ipv6Error =
// (nodeState.data?.p2p_enabled && nodeState.data?.p2p.ipv6.status === 'Error') || false;
if (!didShowError.ipv4 && ipv4Error)
toast.error(
{
title: 'Error starting up P2P!',
body: 'Error creating the IPv4 listener. Please check your firewall settings!'
},
{
id: 'ipv4-listener-error'
}
);
// if (!didShowError.ipv4 && ipv4Error)
// toast.error(
// {
// title: 'Error starting up P2P!',
// body: 'Error creating the IPv4 listener. Please check your firewall settings!'
// },
// {
// id: 'ipv4-listener-error'
// }
// );
if (!didShowError.ipv6 && ipv6Error)
toast.error(
{
title: 'Error starting up P2P!',
body: 'Error creating the IPv6 listener. Please check your firewall settings!'
},
{
id: 'ipv6-listener-error'
}
);
// if (!didShowError.ipv6 && ipv6Error)
// toast.error(
// {
// title: 'Error starting up P2P!',
// body: 'Error creating the IPv6 listener. Please check your firewall settings!'
// },
// {
// id: 'ipv6-listener-error'
// }
// );
setDidShowError({
ipv4: ipv4Error,
ipv6: ipv6Error
});
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [nodeState.data]);
// setDidShowError({
// ipv4: ipv4Error,
// ipv6: ipv6Error
// });
// // eslint-disable-next-line react-hooks/exhaustive-deps
// }, [nodeState.data]);
return null;
}

View file

@ -112,6 +112,7 @@ export type Procedures = {
{ key: "nodes.updateThumbnailerPreferences", input: UpdateThumbnailerPreferences, result: null } |
{ key: "p2p.acceptSpacedrop", input: [string, string | null], result: null } |
{ key: "p2p.cancelSpacedrop", input: string, result: null } |
{ key: "p2p.debugConnect", input: RemoteIdentity, result: string } |
{ key: "p2p.spacedrop", input: SpacedropArgs, result: string } |
{ key: "preferences.update", input: LibraryArgs<LibraryPreferences>, result: null } |
{ key: "search.saved.create", input: LibraryArgs<{ name: string; search?: string | null; filters?: string | null; description?: string | null; icon?: string | null }>, result: null } |
@ -160,7 +161,7 @@ export type CacheNode = { __type: string; __id: string; "#node": any }
export type CameraData = { device_make: string | null; device_model: string | null; color_space: string | null; color_profile: ColorProfile | null; focal_length: number | null; shutter_speed: number | null; flash: Flash | null; orientation: Orientation; lens_make: string | null; lens_model: string | null; bit_depth: number | null; red_eye: boolean | null; zoom: number | null; iso: number | null; software: string | null; serial_number: string | null; lens_serial_number: string | null; contrast: number | null; saturation: number | null; sharpness: number | null; composite: Composite | null }
export type ChangeNodeNameArgs = { name: string | null; p2p_port: MaybeUndefined<number>; p2p_enabled: boolean | null; image_labeler_version: string | null }
export type ChangeNodeNameArgs = { name: string | null; p2p_ipv4_port: Port | null; p2p_ipv6_port: Port | null; p2p_discovery: P2PDiscoveryState | null; image_labeler_version: string | null }
export type CloudInstance = { id: string; uuid: string; identity: RemoteIdentity; nodeId: string; nodeName: string; nodePlatform: number }
@ -247,7 +248,7 @@ export type FileDeleterJobInit = { location_id: number; file_path_ids: number[]
export type FileEraserJobInit = { location_id: number; file_path_ids: number[]; passes: string }
export type FilePath = { id: number; pub_id: number[]; is_dir: boolean | null; cas_id: string | null; integrity_checksum: string | null; location_id: number | null; materialized_path: string | null; name: string | null; extension: string | null; hidden: boolean | null; size_in_bytes: string | null; size_in_bytes_bytes: number[] | null; inode: number[] | null; object_id: number | null; date_created: string | null; date_modified: string | null; date_indexed: string | null }
export type FilePath = { id: number; pub_id: number[]; is_dir: boolean | null; cas_id: string | null; integrity_checksum: string | null; location_id: number | null; materialized_path: string | null; name: string | null; extension: string | null; hidden: boolean | null; size_in_bytes: string | null; size_in_bytes_bytes: number[] | null; inode: number[] | null; object_id: number | null; key_id: number | null; date_created: string | null; date_modified: string | null; date_indexed: string | null }
export type FilePathCursor = { isDir: boolean; variant: FilePathCursorVariant }
@ -261,7 +262,7 @@ export type FilePathOrder = { field: "name"; value: SortOrder } | { field: "size
export type FilePathSearchArgs = { take?: number | null; orderAndPagination?: OrderAndPagination<number, FilePathOrder, FilePathCursor> | null; filters?: SearchFilterArgs[]; groupDirectories?: boolean }
export type FilePathWithObject = { id: number; pub_id: number[]; is_dir: boolean | null; cas_id: string | null; integrity_checksum: string | null; location_id: number | null; materialized_path: string | null; name: string | null; extension: string | null; hidden: boolean | null; size_in_bytes: string | null; size_in_bytes_bytes: number[] | null; inode: number[] | null; object_id: number | null; date_created: string | null; date_modified: string | null; date_indexed: string | null; object: { id: number; pub_id: number[]; kind: number | null; key_id: number | null; hidden: boolean | null; favorite: boolean | null; important: boolean | null; note: string | null; date_created: string | null; date_accessed: string | null } | null }
export type FilePathWithObject = { id: number; pub_id: number[]; is_dir: boolean | null; cas_id: string | null; integrity_checksum: string | null; location_id: number | null; materialized_path: string | null; name: string | null; extension: string | null; hidden: boolean | null; size_in_bytes: string | null; size_in_bytes_bytes: number[] | null; inode: number[] | null; object_id: number | null; key_id: number | null; date_created: string | null; date_modified: string | null; date_indexed: string | null; object: { id: number; pub_id: number[]; kind: number | null; key_id: number | null; hidden: boolean | null; favorite: boolean | null; important: boolean | null; note: string | null; date_created: string | null; date_accessed: string | null } | null }
export type Flash = {
/**
@ -397,7 +398,7 @@ export type LibraryPreferences = { location?: { [key in string]: LocationSetting
export type LightScanArgs = { location_id: number; sub_path: string }
export type ListenerStatus = { status: "Disabled" } | { status: "Enabling" } | { status: "Listening"; port: number } | { status: "Error"; error: string }
export type Listener2 = { id: string; name: string; addrs: string[] }
export type Location = { id: number; pub_id: number[]; name: string | null; path: string | null; total_capacity: number | null; available_capacity: number | null; size_in_bytes: number[] | null; is_archived: boolean | null; generate_preview_media: boolean | null; sync_preview_media: boolean | null; hidden: boolean | null; date_created: string | null; instance_id: number | null }
@ -446,7 +447,7 @@ id: string;
/**
* name is the display name of the current node. This is set by the user and is shown in the UI. // TODO: Length validation so it can fit in DNS record
*/
name: string; p2p_enabled: boolean; p2p_port: number | null; features: BackendFeature[]; preferences: NodePreferences; image_labeler_version: string | null }) & { data_path: string; p2p: P2PStatus; device_model: string | null }
name: string; identity: RemoteIdentity; p2p_ipv4_port: Port; p2p_ipv6_port: Port; p2p_discovery: P2PDiscoveryState; features: BackendFeature[]; preferences: NodePreferences; image_labeler_version: string | null }) & { data_path: string; listeners: Listener2[]; device_model: string | null }
export type NonIndexedPathItem = { path: string; name: string; extension: string; kind: number; is_dir: boolean; date_created: string; date_modified: string; size_in_bytes_bytes: number[]; hidden: boolean }
@ -507,17 +508,19 @@ export type OrderAndPagination<TId, TOrder, TCursor> = { orderOnly: TOrder } | {
export type Orientation = "Normal" | "CW90" | "CW180" | "CW270" | "MirroredVertical" | "MirroredHorizontal" | "MirroredHorizontalAnd90CW" | "MirroredHorizontalAnd270CW"
export type P2PDiscoveryState = "Everyone" | "ContactsOnly" | "Disabled"
/**
* TODO: P2P event for the frontend
*/
export type P2PEvent = { type: "DiscoveredPeer"; identity: RemoteIdentity; metadata: PeerMetadata } | { type: "ExpiredPeer"; identity: RemoteIdentity } | { type: "ConnectedPeer"; identity: RemoteIdentity } | { type: "DisconnectedPeer"; identity: RemoteIdentity } | { type: "SpacedropRequest"; id: string; identity: RemoteIdentity; peer_name: string; files: string[] } | { type: "SpacedropProgress"; id: string; percent: number } | { type: "SpacedropTimedout"; id: string } | { type: "SpacedropRejected"; id: string }
export type P2PStatus = { ipv4: ListenerStatus; ipv6: ListenerStatus }
export type PeerMetadata = { name: string; operating_system: OperatingSystem | null; device_model: HardwareModel | null; version: string | null }
export type PlusCode = string
export type Port = null | number
export type Range<T> = { from: T } | { to: T }
/**

View file

@ -63,6 +63,10 @@ export function P2PContextProvider({ children }: PropsWithChildren) {
);
}
export function useP2PContextRaw() {
return useContext(Context);
}
export function useDiscoveredPeers() {
return useContext(Context).discoveredPeers;
}

View file

@ -4731,10 +4731,6 @@ packages:
peerDependencies:
'@effect-ts/otel-node': '*'
peerDependenciesMeta:
'@effect-ts/core':
optional: true
'@effect-ts/otel':
optional: true
'@effect-ts/otel-node':
optional: true
dependencies: