diff --git a/.gitignore b/.gitignore index 443c90e76..197b77ae6 100644 --- a/.gitignore +++ b/.gitignore @@ -82,3 +82,4 @@ spacedrive .cargo/config.toml .github/scripts/deps .vite-inspect +vite.config.ts.* diff --git a/Cargo.lock b/Cargo.lock index 6e67ccba5..72bbfc55d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -76,11 +76,23 @@ version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a824f2aa7e75a0c98c5a504fceb80649e9c35265d44525b5f94de4771a395cd" dependencies = [ - "getrandom 0.2.11", + "getrandom 0.2.12", "once_cell", "version_check", ] +[[package]] +name = "ahash" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42cd52102d3df161c77a887b608d7a4897d7cc112886a9537b738a887a03aaff" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + [[package]] name = "aho-corasick" version = "0.7.20" @@ -114,6 +126,12 @@ dependencies = [ "alloc-no-stdlib", ] +[[package]] +name = "allocator-api2" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" + [[package]] name = "android-tzdata" version = "0.1.1" @@ -369,9 +387,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.75" +version = "0.1.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdf6721fb0140e4f897002dd086c06f6c27775df19cfe1fccb21181a48fd2c98" +checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", @@ -945,9 +963,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.5" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "base64-simd" @@ -1197,7 +1215,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "542f33a8835a0884b006a0c3df3dadd99c0c3f296ed26c2fdc8028e01ad6230c" dependencies = [ "memchr", - "regex-automata 0.4.3", + "regex-automata 0.4.5", "serde", ] @@ -1389,7 +1407,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3431df59f28accaf4cb4eed4a9acc66bea3f3c3753aa6cdc2f024174ef232af7" dependencies = [ - "smallvec 1.11.2", + "smallvec 1.13.1", ] [[package]] @@ -1398,7 +1416,7 @@ version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03915af431787e6ffdcc74c645077518c6b6e01f80b761e0fbbfa288536311b3" dependencies = [ - "smallvec 1.11.2", + "smallvec 1.13.1", "target-lexicon", ] @@ -1866,7 +1884,7 @@ dependencies = [ "phf 0.8.0", "proc-macro2", "quote", - "smallvec 1.11.2", + "smallvec 1.13.1", "syn 1.0.109", ] @@ -2506,7 +2524,7 @@ dependencies = [ "lebe", "miniz_oxide", "rayon-core", - "smallvec 1.11.2", + "smallvec 1.13.1", "zune-inflate", ] @@ -2759,9 +2777,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", @@ -2775,8 +2793,7 @@ dependencies = [ [[package]] name = "futures-bounded" version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1e2774cc104e198ef3d3e1ff4ab40f86fa3245d6cb6a3a46174f21463cee173" +source = "git+https://github.com/spacedriveapp/rust-libp2p.git?rev=a005656df7e82059a0eb2e333ebada4731d23f8c#a005656df7e82059a0eb2e333ebada4731d23f8c" dependencies = [ "futures-timer", "futures-util", @@ -2784,9 +2801,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -2802,7 +2819,7 @@ dependencies = [ "futures-core", "pin-project", "slab", - "smallvec 1.11.2", + "smallvec 1.13.1", ] [[package]] @@ -2813,9 +2830,9 @@ checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", @@ -2825,9 +2842,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-lite" @@ -2867,9 +2884,9 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", @@ -2888,15 +2905,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-ticker" @@ -2917,9 +2934,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-channel", "futures-core", @@ -3050,9 +3067,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" +checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" dependencies = [ "cfg-if", "js-sys", @@ -3133,7 +3150,7 @@ dependencies = [ "gobject-sys", "libc", "once_cell", - "smallvec 1.11.2", + "smallvec 1.13.1", "thiserror", ] @@ -3177,7 +3194,7 @@ dependencies = [ "aho-corasick 1.1.2", "bstr", "log", - "regex-automata 0.4.3", + "regex-automata 0.4.5", "regex-syntax 0.8.2", "serde", ] @@ -3299,13 +3316,19 @@ dependencies = [ "num-traits", ] +[[package]] +name = "hash_map_diff" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff9168f8bc101ee284e882004680c32a048cd8076bc4c134443ff0f5dd302610" + [[package]] name = "hashbrown" version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" dependencies = [ - "ahash", + "ahash 0.7.7", ] [[package]] @@ -3314,7 +3337,7 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "ahash", + "ahash 0.7.7", ] [[package]] @@ -3322,6 +3345,10 @@ name = "hashbrown" version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +dependencies = [ + "ahash 0.8.8", + "allocator-api2", +] [[package]] name = "hashlink" @@ -3338,7 +3365,7 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06683b93020a07e3dbcf5f8c0f6d40080d725bea7936fc01ad345c01b97dc270" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "bytes", "headers-core", "http", @@ -3429,7 +3456,7 @@ dependencies = [ "parking_lot 0.12.1", "rand 0.8.5", "resolv-conf", - "smallvec 1.11.2", + "smallvec 1.13.1", "thiserror", "tokio", "tracing", @@ -3554,7 +3581,7 @@ source = "git+https://github.com/spacedriveapp/rspc.git?rev=f3347e2e8bfe3f37bfac dependencies = [ "async-tungstenite", "axum", - "base64 0.21.5", + "base64 0.21.7", "cookie", "form_urlencoded", "futures", @@ -3713,9 +3740,9 @@ dependencies = [ [[package]] name = "igd-next" -version = "0.14.2" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57e065e90a518ab5fedf79aa1e4b784e10f8e484a834f6bda85c42633a2cb7af" +checksum = "064d90fec10d541084e7b39ead8875a5a80d9114a2b18791565253bae25f49e4" dependencies = [ "async-trait", "attohttpc", @@ -3740,7 +3767,7 @@ dependencies = [ "globset", "log", "memchr", - "regex-automata 0.4.3", + "regex-automata 0.4.5", "same-file", "walkdir", "winapi-util", @@ -4158,9 +4185,9 @@ checksum = "03087c2bad5e1034e8cace5926dec053fb3790248370865f5117a7d0213354c8" [[package]] name = "libc" -version = "0.2.151" +version = "0.2.153" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libdbus-sys" @@ -4225,14 +4252,13 @@ checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libp2p" version = "0.53.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "681fb3f183edfbedd7a57d32ebe5dcdc0b9f94061185acf3c30249349cc6fc99" +source = "git+https://github.com/spacedriveapp/rust-libp2p.git?rev=a005656df7e82059a0eb2e333ebada4731d23f8c#a005656df7e82059a0eb2e333ebada4731d23f8c" dependencies = [ "bytes", "either", "futures", "futures-timer", - "getrandom 0.2.11", + "getrandom 0.2.12", "instant", "libp2p-allow-block-list", "libp2p-connection-limits", @@ -4242,7 +4268,7 @@ dependencies = [ "libp2p-identity", "libp2p-kad", "libp2p-mdns", - "libp2p-quic", + "libp2p-quic 0.10.2 (git+https://github.com/spacedriveapp/rust-libp2p.git?rev=a005656df7e82059a0eb2e333ebada4731d23f8c)", "libp2p-swarm", "libp2p-tcp", "libp2p-upnp", @@ -4255,8 +4281,7 @@ dependencies = [ [[package]] name = "libp2p-allow-block-list" version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "107b238b794cb83ab53b74ad5dcf7cca3200899b72fe662840cfb52f5b0a32e6" +source = "git+https://github.com/spacedriveapp/rust-libp2p.git?rev=a005656df7e82059a0eb2e333ebada4731d23f8c#a005656df7e82059a0eb2e333ebada4731d23f8c" dependencies = [ "libp2p-core", "libp2p-identity", @@ -4267,8 +4292,7 @@ dependencies = [ [[package]] name = "libp2p-connection-limits" version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7cd50a78ccfada14de94cbacd3ce4b0138157f376870f13d3a8422cd075b4fd" +source = "git+https://github.com/spacedriveapp/rust-libp2p.git?rev=a005656df7e82059a0eb2e333ebada4731d23f8c#a005656df7e82059a0eb2e333ebada4731d23f8c" dependencies = [ "libp2p-core", "libp2p-identity", @@ -4279,8 +4303,7 @@ dependencies = [ [[package]] name = "libp2p-core" version = "0.41.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8130a8269e65a2554d55131c770bdf4bcd94d2b8d4efb24ca23699be65066c05" +source = "git+https://github.com/spacedriveapp/rust-libp2p.git?rev=a005656df7e82059a0eb2e333ebada4731d23f8c#a005656df7e82059a0eb2e333ebada4731d23f8c" dependencies = [ "either", "fnv", @@ -4298,7 +4321,7 @@ dependencies = [ "rand 0.8.5", "rw-stream-sink", "serde", - "smallvec 1.11.2", + "smallvec 1.13.1", "thiserror", "tracing", "unsigned-varint 0.8.0", @@ -4308,8 +4331,7 @@ dependencies = [ [[package]] name = "libp2p-dns" version = "0.41.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d17cbcf7160ff35c3e8e560de4a068fe9d6cb777ea72840e48eb76ff9576c4b6" +source = "git+https://github.com/spacedriveapp/rust-libp2p.git?rev=a005656df7e82059a0eb2e333ebada4731d23f8c#a005656df7e82059a0eb2e333ebada4731d23f8c" dependencies = [ "async-trait", "futures", @@ -4317,25 +4339,24 @@ dependencies = [ "libp2p-core", "libp2p-identity", "parking_lot 0.12.1", - "smallvec 1.11.2", + "smallvec 1.13.1", "tracing", ] [[package]] name = "libp2p-gossipsub" version = "0.46.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d665144a616dadebdc5fff186b1233488cdcd8bfb1223218ff084b6d052c94f7" +source = "git+https://github.com/spacedriveapp/rust-libp2p.git?rev=a005656df7e82059a0eb2e333ebada4731d23f8c#a005656df7e82059a0eb2e333ebada4731d23f8c" dependencies = [ "asynchronous-codec", - "base64 0.21.5", + "base64 0.21.7", "byteorder", "bytes", "either", "fnv", "futures", "futures-ticker", - "getrandom 0.2.11", + "getrandom 0.2.12", "hex_fmt", "instant", "libp2p-core", @@ -4348,7 +4369,7 @@ dependencies = [ "regex", "serde", "sha2 0.10.8", - "smallvec 1.11.2", + "smallvec 1.13.1", "tracing", "void", ] @@ -4374,9 +4395,8 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.45.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc5767727d062c4eac74dd812c998f0e488008e82cce9c33b463d38423f9ad2" +version = "0.45.4" +source = "git+https://github.com/spacedriveapp/rust-libp2p.git?rev=a005656df7e82059a0eb2e333ebada4731d23f8c#a005656df7e82059a0eb2e333ebada4731d23f8c" dependencies = [ "arrayvec", "asynchronous-codec", @@ -4395,7 +4415,7 @@ dependencies = [ "rand 0.8.5", "serde", "sha2 0.10.8", - "smallvec 1.11.2", + "smallvec 1.13.1", "thiserror", "tracing", "uint", @@ -4405,8 +4425,7 @@ dependencies = [ [[package]] name = "libp2p-mdns" version = "0.45.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49007d9a339b3e1d7eeebc4d67c05dbf23d300b7d091193ec2d3f26802d7faf2" +source = "git+https://github.com/spacedriveapp/rust-libp2p.git?rev=a005656df7e82059a0eb2e333ebada4731d23f8c#a005656df7e82059a0eb2e333ebada4731d23f8c" dependencies = [ "data-encoding", "futures", @@ -4416,7 +4435,7 @@ dependencies = [ "libp2p-identity", "libp2p-swarm", "rand 0.8.5", - "smallvec 1.11.2", + "smallvec 1.13.1", "socket2 0.5.5", "tokio", "tracing", @@ -4435,7 +4454,7 @@ dependencies = [ "if-watch", "libp2p-core", "libp2p-identity", - "libp2p-tls", + "libp2p-tls 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.12.1", "quinn", "rand 0.8.5", @@ -4447,11 +4466,47 @@ dependencies = [ "tracing", ] +[[package]] +name = "libp2p-quic" +version = "0.10.2" +source = "git+https://github.com/spacedriveapp/rust-libp2p.git?rev=a005656df7e82059a0eb2e333ebada4731d23f8c#a005656df7e82059a0eb2e333ebada4731d23f8c" +dependencies = [ + "bytes", + "futures", + "futures-timer", + "if-watch", + "libp2p-core", + "libp2p-identity", + "libp2p-tls 0.3.0 (git+https://github.com/spacedriveapp/rust-libp2p.git?rev=a005656df7e82059a0eb2e333ebada4731d23f8c)", + "parking_lot 0.12.1", + "quinn", + "rand 0.8.5", + "ring 0.16.20", + "rustls", + "socket2 0.5.5", + "thiserror", + "tokio", + "tracing", +] + +[[package]] +name = "libp2p-stream" +version = "0.1.0-alpha" +source = "git+https://github.com/spacedriveapp/rust-libp2p.git?rev=a005656df7e82059a0eb2e333ebada4731d23f8c#a005656df7e82059a0eb2e333ebada4731d23f8c" +dependencies = [ + "futures", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "rand 0.8.5", + "tracing", + "void", +] + [[package]] name = "libp2p-swarm" -version = "0.44.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e92532fc3c4fb292ae30c371815c9b10103718777726ea5497abc268a4761866" +version = "0.44.2" +source = "git+https://github.com/spacedriveapp/rust-libp2p.git?rev=a005656df7e82059a0eb2e333ebada4731d23f8c#a005656df7e82059a0eb2e333ebada4731d23f8c" dependencies = [ "either", "fnv", @@ -4460,10 +4515,11 @@ dependencies = [ "instant", "libp2p-core", "libp2p-identity", + "lru 0.12.2", "multistream-select", "once_cell", "rand 0.8.5", - "smallvec 1.11.2", + "smallvec 1.13.1", "tokio", "tracing", "void", @@ -4472,8 +4528,7 @@ dependencies = [ [[package]] name = "libp2p-tcp" version = "0.41.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b2460fc2748919adff99ecbc1aab296e4579e41f374fb164149bd2c9e529d4c" +source = "git+https://github.com/spacedriveapp/rust-libp2p.git?rev=a005656df7e82059a0eb2e333ebada4731d23f8c#a005656df7e82059a0eb2e333ebada4731d23f8c" dependencies = [ "futures", "futures-timer", @@ -4505,11 +4560,28 @@ dependencies = [ "yasna", ] +[[package]] +name = "libp2p-tls" +version = "0.3.0" +source = "git+https://github.com/spacedriveapp/rust-libp2p.git?rev=a005656df7e82059a0eb2e333ebada4731d23f8c#a005656df7e82059a0eb2e333ebada4731d23f8c" +dependencies = [ + "futures", + "futures-rustls", + "libp2p-core", + "libp2p-identity", + "rcgen", + "ring 0.16.20", + "rustls", + "rustls-webpki", + "thiserror", + "x509-parser", + "yasna", +] + [[package]] name = "libp2p-upnp" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "963eb8a174f828f6a51927999a9ab5e45dfa9aa2aa5fed99aa65f79de6229464" +version = "0.2.1" +source = "git+https://github.com/spacedriveapp/rust-libp2p.git?rev=a005656df7e82059a0eb2e333ebada4731d23f8c#a005656df7e82059a0eb2e333ebada4731d23f8c" dependencies = [ "futures", "futures-timer", @@ -4620,6 +4692,15 @@ dependencies = [ "hashbrown 0.12.3", ] +[[package]] +name = "lru" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2c024b41519440580066ba82aab04092b333e09066a5eb86c7c4890df31f22" +dependencies = [ + "hashbrown 0.14.3", +] + [[package]] name = "lru-cache" version = "0.1.2" @@ -4807,7 +4888,7 @@ version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e52eb6380b6d2a10eb3434aec0885374490f5b82c8aaf5cd487a183c98be834" dependencies = [ - "ahash", + "ahash 0.7.7", "metrics-macros", ] @@ -4817,7 +4898,7 @@ version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "142c53885123b68d94108295a09d4afe1a1388ed95b54d5dacd9a454753030f2" dependencies = [ - "ahash", + "ahash 0.7.7", "metrics-macros", ] @@ -4914,7 +4995,7 @@ dependencies = [ "crossbeam-utils", "dashmap", "skeptic", - "smallvec 1.11.2", + "smallvec 1.13.1", "tagptr", "triomphe", ] @@ -5016,15 +5097,14 @@ dependencies = [ [[package]] name = "multistream-select" version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea0df8e5eec2298a62b326ee4f0d7fe1a6b90a09dfcf9df37b38f947a8c42f19" +source = "git+https://github.com/spacedriveapp/rust-libp2p.git?rev=a005656df7e82059a0eb2e333ebada4731d23f8c#a005656df7e82059a0eb2e333ebada4731d23f8c" dependencies = [ "bytes", "futures", - "log", "pin-project", - "smallvec 1.11.2", - "unsigned-varint 0.7.2", + "smallvec 1.13.1", + "tracing", + "unsigned-varint 0.8.0", ] [[package]] @@ -5048,7 +5128,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" dependencies = [ - "getrandom 0.2.11", + "getrandom 0.2.12", ] [[package]] @@ -5198,7 +5278,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77a5d83df9f36fe23f0c3648c6bbb8b0298bb5f1939c8f2704431371f4b84d43" dependencies = [ - "smallvec 1.11.2", + "smallvec 1.13.1", ] [[package]] @@ -5225,6 +5305,12 @@ dependencies = [ "libc", ] +[[package]] +name = "no-std-compat" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df270209a7f04d62459240d890ecb792714d5db12c92937823574a09930276b4" + [[package]] name = "nodrop" version = "0.1.14" @@ -5768,7 +5854,7 @@ dependencies = [ "instant", "libc", "redox_syscall 0.2.16", - "smallvec 1.11.2", + "smallvec 1.13.1", "winapi", ] @@ -5781,7 +5867,7 @@ dependencies = [ "cfg-if", "libc", "redox_syscall 0.4.1", - "smallvec 1.11.2", + "smallvec 1.13.1", "windows-targets 0.48.5", ] @@ -5859,7 +5945,7 @@ version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b8fcc794035347fb64beda2d3b462595dd2753e3f268d89c5aae77e8cf2c310" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "serde", ] @@ -6066,18 +6152,18 @@ checksum = "5be167a7af36ee22fe3115051bc51f6e6c7054c9348e28deb4f49bd6f705a315" [[package]] name = "pin-project" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" +checksum = "0302c4a0442c456bd56f841aee5c3bfd17967563f6fadc9ceb9f9c23cf3807e0" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" +checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" dependencies = [ "proc-macro2", "quote", @@ -6140,7 +6226,7 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5699cc8a63d1aa2b1ee8e12b9ad70ac790d65788cd36101fa37f87ea46c4cef" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "indexmap 2.2.1", "line-wrap", "quick-xml", @@ -6446,9 +6532,9 @@ dependencies = [ [[package]] name = "prometheus-client" -version = "0.22.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "510c4f1c9d81d556458f94c98f857748130ea9737bbd6053da497503b26ea63c" +checksum = "6f87c10af16e0af74010d2a123d202e8363c04db5acfa91d8747f64a8524da3a" dependencies = [ "dtoa", "itoa 1.0.10", @@ -6597,7 +6683,7 @@ dependencies = [ "futures", "indexmap 1.9.3", "itertools 0.10.5", - "lru", + "lru 0.7.8", "once_cell", "opentelemetry", "petgraph", @@ -6653,8 +6739,7 @@ dependencies = [ [[package]] name = "quick-protobuf-codec" version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15a0580ab32b169745d7a39db2ba969226ca16738931be152a3209b409de2474" +source = "git+https://github.com/spacedriveapp/rust-libp2p.git?rev=a005656df7e82059a0eb2e333ebada4731d23f8c#a005656df7e82059a0eb2e333ebada4731d23f8c" dependencies = [ "asynchronous-codec", "bytes", @@ -6833,7 +6918,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.11", + "getrandom 0.2.12", ] [[package]] @@ -6946,20 +7031,20 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" dependencies = [ - "getrandom 0.2.11", + "getrandom 0.2.12", "libredox", "thiserror", ] [[package]] name = "regex" -version = "1.10.2" +version = "1.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" +checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" dependencies = [ "aho-corasick 1.1.2", "memchr", - "regex-automata 0.4.3", + "regex-automata 0.4.5", "regex-syntax 0.8.2", ] @@ -6974,9 +7059,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" +checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" dependencies = [ "aho-corasick 1.1.2", "memchr", @@ -7034,7 +7119,7 @@ version = "0.11.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "bytes", "encoding_rs", "futures-core", @@ -7161,7 +7246,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" dependencies = [ "cc", - "getrandom 0.2.11", + "getrandom 0.2.12", "libc", "spin 0.9.8", "untrusted 0.9.0", @@ -7257,7 +7342,7 @@ dependencies = [ "hashlink", "libsqlite3-sys", "memchr", - "smallvec 1.11.2", + "smallvec 1.13.1", ] [[package]] @@ -7347,7 +7432,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", ] [[package]] @@ -7374,7 +7459,7 @@ checksum = "71cd15fef9112a1f94ac64b58d1e4628192631ad6af4dc69997f995459c874e7" dependencies = [ "bitflags 1.3.2", "bytemuck", - "smallvec 1.11.2", + "smallvec 1.13.1", "ttf-parser", "unicode-bidi-mirroring", "unicode-ccc", @@ -7385,8 +7470,7 @@ dependencies = [ [[package]] name = "rw-stream-sink" version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8c9026ff5d2f23da5e45bbc283f156383001bfb09c4e44256d02c1a685fe9a1" +source = "git+https://github.com/spacedriveapp/rust-libp2p.git?rev=a005656df7e82059a0eb2e333ebada4731d23f8c#a005656df7e82059a0eb2e333ebada4731d23f8c" dependencies = [ "futures", "pin-project", @@ -7570,10 +7654,10 @@ dependencies = [ name = "sd-cloud-api" version = "0.1.0" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "reqwest", "rspc", - "sd-p2p", + "sd-p2p2", "serde", "serde_json", "specta", @@ -7594,7 +7678,7 @@ dependencies = [ "aws-credential-types", "aws-sdk-s3", "axum", - "base64 0.21.5", + "base64 0.21.7", "base91", "blake3", "bytes", @@ -7636,7 +7720,10 @@ dependencies = [ "sd-file-path-helper", "sd-images", "sd-media-metadata", - "sd-p2p", + "sd-p2p-block", + "sd-p2p-proto", + "sd-p2p-tunnel", + "sd-p2p2", "sd-prisma", "sd-sync", "sd-utils", @@ -7891,24 +7978,58 @@ dependencies = [ ] [[package]] -name = "sd-p2p" +name = "sd-p2p-block" version = "0.1.0" dependencies = [ - "base64 0.21.5", + "sd-p2p-proto", + "sd-p2p2", + "thiserror", + "tokio", + "tracing", + "uuid", +] + +[[package]] +name = "sd-p2p-proto" +version = "0.1.0" +dependencies = [ + "ed25519-dalek", + "thiserror", + "tokio", + "uuid", +] + +[[package]] +name = "sd-p2p-tunnel" +version = "0.1.0" +dependencies = [ + "sd-p2p2", + "tokio", +] + +[[package]] +name = "sd-p2p2" +version = "0.2.0" +dependencies = [ + "base64 0.21.7", "base91", "ed25519-dalek", "flume 0.11.0", "futures-core", + "hash_map_diff", "if-watch", "libp2p", - "libp2p-quic", + "libp2p-quic 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libp2p-stream", "mdns-sd", "pin-project-lite", "rand_core 0.6.4", "serde", "sha256", "specta", + "stable-vec", "streamunordered", + "sync_wrapper", "thiserror", "tokio", "tokio-stream", @@ -8050,7 +8171,7 @@ dependencies = [ "phf_codegen 0.8.0", "precomputed-hash", "servo_arc", - "smallvec 1.11.2", + "smallvec 1.13.1", "thin-slice", ] @@ -8171,7 +8292,7 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64cd236ccc1b7a29e7e2739f27c0b2dd199804abc4290e32f59f3b68d6405c23" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "chrono", "hex", "indexmap 1.9.3", @@ -8414,9 +8535,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.11.2" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" +checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" [[package]] name = "socket2" @@ -8643,6 +8764,15 @@ dependencies = [ "log", ] +[[package]] +name = "stable-vec" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80dfb7bb28f3d2fa50566793349d633b33f938543154be8071610ea9f590d8ca" +dependencies = [ + "no-std-compat", +] + [[package]] name = "stable_deref_trait" version = "1.2.0" @@ -8761,7 +8891,7 @@ version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bbdb58577b6301f8d17ae2561f32002a5bae056d444e0f69e611e504a276204" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "serde", "serde_json", ] @@ -8971,7 +9101,7 @@ version = "1.5.3" source = "git+https://github.com/spacedriveapp/tauri.git?rev=8409af71a83d631ff9d1cd876c441a57511a1cbd#8409af71a83d631ff9d1cd876c441a57511a1cbd" dependencies = [ "anyhow", - "base64 0.21.5", + "base64 0.21.7", "bytes", "cocoa", "dirs-next", @@ -9047,7 +9177,7 @@ name = "tauri-codegen" version = "1.4.1" source = "git+https://github.com/spacedriveapp/tauri.git?rev=8409af71a83d631ff9d1cd876c441a57511a1cbd#8409af71a83d631ff9d1cd876c441a57511a1cbd" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "brotli", "ico", "json-patch", @@ -9271,18 +9401,18 @@ checksum = "8eaa81235c7058867fa8c0e7314f33dcce9c215f535d1913822a2b3f5e289f3c" [[package]] name = "thiserror" -version = "1.0.51" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f11c217e1416d6f036b870f14e0413d480dbf28edbee1f877abaf0206af43bb7" +checksum = "1e45bcbe8ed29775f228095caf2cd67af7a4ccf756ebff23a306bf3e8b47b24b" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.51" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01742297787513b79cf8e29d1056ede1313e2420b7b3b15d0a768b4921f549df" +checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" dependencies = [ "proc-macro2", "quote", @@ -9382,9 +9512,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.35.1" +version = "1.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104" +checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" dependencies = [ "backtrace", "bytes", @@ -9665,7 +9795,7 @@ dependencies = [ "once_cell", "regex", "sharded-slab", - "smallvec 1.11.2", + "smallvec 1.13.1", "thread_local", "tracing", "tracing-core", @@ -9918,7 +10048,7 @@ version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8cdd25c339e200129fe4de81451814e5228c9b771d57378817d6117cc2b3f97" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "log", "once_cell", "rustls", @@ -9976,7 +10106,7 @@ version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c51daa774fe9ee5efcf7b4fec13019b8119cda764d9a8b5b06df02bb1445c656" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "log", "pico-args", "usvg-parser", @@ -10058,7 +10188,7 @@ version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e395fcf16a7a3d8127ec99782007af141946b4795001f876d54fb0d55978560" dependencies = [ - "getrandom 0.2.11", + "getrandom 0.2.12", "serde", ] @@ -10993,6 +11123,26 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "zerocopy" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.48", +] + [[package]] name = "zeroize" version = "1.7.0" diff --git a/Cargo.toml b/Cargo.toml index 6a3396275..f54358834 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -57,7 +57,7 @@ base64 = "0.21.5" blake3 = "1.5.0" chrono = "0.4.31" clap = "4.4.7" -futures = "0.3.29" +futures = "0.3.30" futures-concurrency = "7.4.3" hex = "0.4.3" http = "0.2.9" @@ -76,7 +76,7 @@ strum = "0.25" strum_macros = "0.25" tempfile = "3.8.1" thiserror = "1.0.50" -tokio = "1.34.0" +tokio = "1.36.0" tokio-stream = "0.1.14" tokio-util = "0.7.10" uhlc = "=0.5.2" @@ -87,13 +87,19 @@ webp = "0.2.6" # Proper IOS Support if-watch = { git = "https://github.com/oscartbeaumont/if-watch.git", rev = "a92c17d3f85c1c6fb0afeeaf6c2b24d0b147e8c3" } -# Beta features +# We hack it to the high heavens rspc = { git = "https://github.com/spacedriveapp/rspc.git", rev = "f3347e2e8bfe3f37bfacc437ca329fe71cdcb048" } # `cursor_position` method tauri = { git = "https://github.com/spacedriveapp/tauri.git", rev = "8409af71a83d631ff9d1cd876c441a57511a1cbd" } tao = { git = "https://github.com/spacedriveapp/tao", rev = "7880adbc090402c44fbcf006669458fa82623403" } +# Add `Control::open_stream_with_addrs` +libp2p = { git = "https://github.com/spacedriveapp/rust-libp2p.git", rev = "a005656df7e82059a0eb2e333ebada4731d23f8c" } +libp2p-core = { git = "https://github.com/spacedriveapp/rust-libp2p.git", rev = "a005656df7e82059a0eb2e333ebada4731d23f8c" } +libp2p-swarm = { git = "https://github.com/spacedriveapp/rust-libp2p.git", rev = "a005656df7e82059a0eb2e333ebada4731d23f8c" } +libp2p-stream = { git = "https://github.com/spacedriveapp/rust-libp2p.git", rev = "a005656df7e82059a0eb2e333ebada4731d23f8c" } + # Set the settings for build scripts and proc-macros. [profile.dev.build-override] opt-level = 3 diff --git a/apps/mobile/src/screens/settings/client/GeneralSettings.tsx b/apps/mobile/src/screens/settings/client/GeneralSettings.tsx index 6cfce95cb..2e89489cd 100644 --- a/apps/mobile/src/screens/settings/client/GeneralSettings.tsx +++ b/apps/mobile/src/screens/settings/client/GeneralSettings.tsx @@ -37,8 +37,9 @@ const GeneralSettingsScreen = ({ navigation }: SettingsStackScreenProps<'General {/* Node Name and Port */} Node Name - Node Port - + {/* // TODO: Bring this back */} + {/* Node Port */} + {/* */} {debugState.enabled && ( diff --git a/core/Cargo.toml b/core/Cargo.toml index e59da6b17..b1ed15311 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -38,7 +38,10 @@ sd-images = { path = "../crates/images", features = [ "specta", ] } sd-media-metadata = { path = "../crates/media-metadata" } -sd-p2p = { path = "../crates/p2p", features = ["specta", "serde"] } +sd-p2p2 = { path = "../crates/p2p2", features = ["specta"] } +sd-p2p-block = { path = "../crates/p2p-block" } +sd-p2p-proto = { path = "../crates/p2p-proto" } +sd-p2p-tunnel = { path = "../crates/p2p-tunnel" } sd-prisma = { path = "../crates/prisma" } sd-ai = { path = "../crates/ai", optional = true } sd-sync = { path = "../crates/sync" } diff --git a/core/prisma/migrations/20240221044741_drop_node_peer_id/migration.sql b/core/prisma/migrations/20240221044741_drop_node_peer_id/migration.sql new file mode 100644 index 000000000..deda288c5 --- /dev/null +++ b/core/prisma/migrations/20240221044741_drop_node_peer_id/migration.sql @@ -0,0 +1,33 @@ +/* + Warnings: + + - You are about to drop the column `pub_id` on the `label` table. All the data in the column will be lost. + - You are about to drop the column `node_peer_id` on the `node` table. All the data in the column will be lost. + +*/ +-- RedefineTables +PRAGMA foreign_keys=OFF; +CREATE TABLE "new_label" ( + "id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + "name" TEXT NOT NULL, + "date_created" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + "date_modified" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP +); +INSERT INTO "new_label" ("date_created", "date_modified", "id", "name") SELECT "date_created", "date_modified", "id", "name" FROM "label"; +DROP TABLE "label"; +ALTER TABLE "new_label" RENAME TO "label"; +CREATE UNIQUE INDEX "label_name_key" ON "label"("name"); +CREATE TABLE "new_node" ( + "id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, + "pub_id" BLOB NOT NULL, + "name" TEXT NOT NULL, + "platform" INTEGER NOT NULL, + "date_created" DATETIME NOT NULL, + "identity" BLOB +); +INSERT INTO "new_node" ("date_created", "id", "identity", "name", "platform", "pub_id") SELECT "date_created", "id", "identity", "name", "platform", "pub_id" FROM "node"; +DROP TABLE "node"; +ALTER TABLE "new_node" RENAME TO "node"; +CREATE UNIQUE INDEX "node_pub_id_key" ON "node"("pub_id"); +PRAGMA foreign_key_check; +PRAGMA foreign_keys=ON; diff --git a/core/prisma/schema.prisma b/core/prisma/schema.prisma index 4052b7a35..73cc986de 100644 --- a/core/prisma/schema.prisma +++ b/core/prisma/schema.prisma @@ -43,7 +43,6 @@ model Node { platform Int date_created DateTime identity Bytes? // TODO: Change to required field in future - node_peer_id String? // TODO: Remove as part of - https://linear.app/spacedriveapp/issue/ENG-757/p2p-library-portability @@map("node") } @@ -72,8 +71,8 @@ model Instance { locations Location[] - CRDTOperation CRDTOperation[] - CloudCRDTOperation CloudCRDTOperation[] + CRDTOperation CRDTOperation[] + CloudCRDTOperation CloudCRDTOperation[] @@map("instance") } @@ -360,11 +359,11 @@ model Label { model LabelOnObject { date_created DateTime @default(now()) - object_id Int - object Object @relation(fields: [object_id], references: [id], onDelete: Restrict) + object_id Int + object Object @relation(fields: [object_id], references: [id], onDelete: Restrict) - label_id Int - label Label @relation(fields: [label_id], references: [id], onDelete: Restrict) + label_id Int + label Label @relation(fields: [label_id], references: [id], onDelete: Restrict) @@id([label_id, object_id]) @@map("label_on_object") diff --git a/core/src/api/libraries.rs b/core/src/api/libraries.rs index f98f73b53..6f48b67c3 100644 --- a/core/src/api/libraries.rs +++ b/core/src/api/libraries.rs @@ -9,7 +9,7 @@ use crate::{ use futures::StreamExt; use sd_cache::{Model, Normalise, NormalisedResult, NormalisedResults}; use sd_file_ext::kind::ObjectKind; -use sd_p2p::spacetunnel::RemoteIdentity; +use sd_p2p2::RemoteIdentity; use sd_prisma::prisma::{indexer_rule, object, statistics}; use tokio_stream::wrappers::IntervalStream; diff --git a/core/src/api/mod.rs b/core/src/api/mod.rs index 32e452eb3..903a22641 100644 --- a/core/src/api/mod.rs +++ b/core/src/api/mod.rs @@ -2,14 +2,15 @@ use crate::{ invalidate_query, job::JobProgressEvent, node::{ - config::{NodeConfig, NodePreferences}, + config::{NodeConfig, NodePreferences, P2PDiscoveryState, Port}, get_hardware_model_name, HardwareModel, }, + p2p::{into_listener2, Listener2}, Node, }; use sd_cache::patch_typedef; -use sd_p2p::P2PStatus; +use sd_p2p2::RemoteIdentity; use std::sync::{atomic::Ordering, Arc}; use itertools::Itertools; @@ -93,8 +94,10 @@ pub struct SanitisedNodeConfig { pub id: Uuid, /// name is the display name of the current node. This is set by the user and is shown in the UI. // TODO: Length validation so it can fit in DNS record pub name: String, - pub p2p_enabled: bool, - pub p2p_port: Option, + pub identity: RemoteIdentity, + pub p2p_ipv4_port: Port, + pub p2p_ipv6_port: Port, + pub p2p_discovery: P2PDiscoveryState, pub features: Vec, pub preferences: NodePreferences, pub image_labeler_version: Option, @@ -105,8 +108,10 @@ impl From for SanitisedNodeConfig { Self { id: value.id, name: value.name, - p2p_enabled: value.p2p.enabled, - p2p_port: value.p2p.port, + identity: value.identity.to_remote_identity(), + p2p_ipv4_port: value.p2p_ipv4_port, + p2p_ipv6_port: value.p2p_ipv6_port, + p2p_discovery: value.p2p_discovery, features: value.features, preferences: value.preferences, image_labeler_version: value.image_labeler_version, @@ -119,7 +124,7 @@ struct NodeState { #[serde(flatten)] config: SanitisedNodeConfig, data_path: String, - p2p: P2PStatus, + listeners: Vec, device_model: Option, } @@ -155,7 +160,7 @@ pub(crate) fn mount() -> Arc { .to_str() .expect("Found non-UTF-8 path") .to_string(), - p2p: node.p2p.manager.status(), + listeners: into_listener2(&node.p2p.p2p.listeners()), device_model: Some(device_model), }) }) diff --git a/core/src/api/nodes.rs b/core/src/api/nodes.rs index 3b69306d0..92198323d 100644 --- a/core/src/api/nodes.rs +++ b/core/src/api/nodes.rs @@ -1,4 +1,7 @@ -use crate::{invalidate_query, util::MaybeUndefined}; +use crate::{ + invalidate_query, + node::config::{P2PDiscoveryState, Port}, +}; use sd_prisma::prisma::{instance, location}; @@ -16,8 +19,9 @@ pub(crate) fn mount() -> AlphaRouter { #[derive(Deserialize, Type)] pub struct ChangeNodeNameArgs { pub name: Option, - pub p2p_port: MaybeUndefined, - pub p2p_enabled: Option, + pub p2p_ipv4_port: Option, + pub p2p_ipv6_port: Option, + pub p2p_discovery: Option, pub image_labeler_version: Option, } R.mutation(|node, args: ChangeNodeNameArgs| async move { @@ -30,9 +34,6 @@ pub(crate) fn mount() -> AlphaRouter { } } - let does_p2p_need_refresh = - args.p2p_enabled.is_some() || args.p2p_port.is_defined(); - #[cfg(feature = "ai")] let mut new_model = None; @@ -42,11 +43,15 @@ pub(crate) fn mount() -> AlphaRouter { config.name = name; } - config.p2p.enabled = args.p2p_enabled.unwrap_or(config.p2p.enabled); - - if let Some(v) = args.p2p_port.into() { - config.p2p.port = v; - } + if let Some(port) = args.p2p_ipv4_port { + config.p2p_ipv4_port = port; + }; + if let Some(port) = args.p2p_ipv6_port { + config.p2p_ipv6_port = port; + }; + if let Some(v) = args.p2p_discovery { + config.p2p_discovery = v; + }; #[cfg(feature = "ai")] if let Some(version) = args.image_labeler_version { @@ -59,9 +64,9 @@ pub(crate) fn mount() -> AlphaRouter { new_model = sd_ai::image_labeler::YoloV8::model(Some(&version)) .map_err(|e| { error!( - "Failed to crate image_detection model: '{}'; Error: {e:#?}", - &version, - ); + "Failed to crate image_detection model: '{}'; Error: {e:#?}", + &version, + ); }) .ok(); if new_model.is_some() { @@ -79,13 +84,8 @@ pub(crate) fn mount() -> AlphaRouter { ) })?; - // If a P2P config was modified reload it - if does_p2p_need_refresh { - node.p2p - .manager - .update_config(node.config.get().await.p2p.clone()) - .await; - } + // This is a no-op if the config didn't change + node.p2p.on_node_config_change().await; invalidate_query!(node; node, "nodeState"); diff --git a/core/src/api/p2p.rs b/core/src/api/p2p.rs index ad994fc3e..b464fa57b 100644 --- a/core/src/api/p2p.rs +++ b/core/src/api/p2p.rs @@ -1,11 +1,12 @@ -use crate::p2p::{operations, P2PEvent}; +use crate::p2p::{operations, Header, P2PEvent, PeerMetadata}; -use sd_p2p::spacetunnel::RemoteIdentity; +use sd_p2p2::RemoteIdentity; use rspc::{alpha::AlphaRouter, ErrorCode}; use serde::Deserialize; use specta::Type; use std::path::PathBuf; +use tokio::io::AsyncWriteExt; use uuid::Uuid; use super::{Ctx, R}; @@ -14,26 +15,21 @@ pub(crate) fn mount() -> AlphaRouter { R.router() .procedure("events", { R.subscription(|node, _: ()| async move { - let mut rx = node.p2p.subscribe(); + let mut rx = node.p2p.events.subscribe(); let mut queued = Vec::new(); - // TODO: Don't block subscription start - for peer in node.p2p.node.get_discovered() { - queued.push(P2PEvent::DiscoveredPeer { - identity: peer.identity, - metadata: peer.metadata, - }); - } - - // TODO: Don't block subscription start - for identity in node.p2p.manager.get_connected_peers().await.map_err(|_| { - rspc::Error::new( - ErrorCode::InternalServerError, - "todo: error getting connected peers".into(), - ) - })? { - queued.push(P2PEvent::ConnectedPeer { identity }); + for (identity, peer, metadata) in + node.p2p.p2p.peers().iter().filter_map(|(i, p)| { + PeerMetadata::from_hashmap(&p.metadata()) + .ok() + .map(|m| (i, p, m)) + }) { + let identity = *identity; + match peer.is_connected() { + true => queued.push(P2PEvent::ConnectedPeer { identity }), + false => queued.push(P2PEvent::DiscoveredPeer { identity, metadata }), + } } Ok(async_stream::stream! { @@ -48,10 +44,36 @@ pub(crate) fn mount() -> AlphaRouter { }) }) .procedure("state", { - R.query(|node, _: ()| async move { - // TODO: This has a potentially invalid map key and Specta don't like that. - // TODO: This will bypass that check and for an debug route that's fine. - Ok(serde_json::to_value(node.p2p.state()).unwrap()) + R.query(|node, _: ()| async move { Ok(node.p2p.state().await) }) + }) + .procedure("debugConnect", { + R.mutation(|node, identity: RemoteIdentity| async move { + let peer = { node.p2p.p2p.peers().get(&identity).cloned() }; + let mut stream = peer + .ok_or(rspc::Error::new( + ErrorCode::InternalServerError, + "big man, offline".into(), + ))? + .new_stream() + .await + .map_err(|err| { + rspc::Error::new( + ErrorCode::InternalServerError, + format!("error in peer.new_stream: {:?}", err), + ) + })?; + + stream + .write_all(&Header::Ping.to_bytes()) + .await + .map_err(|err| { + rspc::Error::new( + ErrorCode::InternalServerError, + format!("error sending ping header: {:?}", err), + ) + })?; + + Ok("connected") }) }) .procedure("spacedrop", { diff --git a/core/src/cloud/sync/receive.rs b/core/src/cloud/sync/receive.rs index 5a7439c17..7ab1c16f4 100644 --- a/core/src/cloud/sync/receive.rs +++ b/core/src/cloud/sync/receive.rs @@ -3,7 +3,7 @@ use crate::library::{Libraries, Library}; use super::{err_break, err_return, CompressedCRDTOperations}; use sd_cloud_api::RequestConfigProvider; use sd_core_sync::NTP64; -use sd_p2p::spacetunnel::{IdentityOrRemoteIdentity, RemoteIdentity}; +use sd_p2p2::{IdentityOrRemoteIdentity, RemoteIdentity}; use sd_prisma::prisma::{cloud_crdt_operation, instance, PrismaClient, SortOrder}; use sd_sync::CRDTOperation; use sd_utils::uuid_to_bytes; diff --git a/core/src/custom_uri/mod.rs b/core/src/custom_uri/mod.rs index 481980af3..19ef0607b 100644 --- a/core/src/custom_uri/mod.rs +++ b/core/src/custom_uri/mod.rs @@ -9,10 +9,8 @@ use crate::{ use sd_file_ext::text::is_text; use sd_file_path_helper::{file_path_to_handle_custom_uri, IsolatedFilePathData}; -use sd_p2p::{ - spaceblock::Range, - spacetunnel::{IdentityOrRemoteIdentity, RemoteIdentity}, -}; +use sd_p2p2::{IdentityOrRemoteIdentity, RemoteIdentity}; +use sd_p2p_block::Range; use sd_prisma::prisma::{file_path, location}; use sd_utils::db::maybe_missing; @@ -243,45 +241,43 @@ pub fn router(node: Arc) -> Router<()> { } // TODO: Support `Range` requests and `ETag` headers - match state.node.p2p.get_library_service(&library.id) { - Some(service) => { - let stream = service - .connect(state.node.p2p.manager.clone(), &identity) - .await - .map_err(|err| { - not_found(format!( - "Error connecting to {identity}: {err:?}" - )) - })?; + let stream = state + .node + .p2p + .get_instance(&library.id, identity) + .ok_or_else(|| { + not_found(format!("Error connecting to {identity}: no connection method available")) + })? + .new_stream() + .await + .map_err(|err| { + not_found(format!("Error connecting to {identity}: {err:?}")) + })?; - let (tx, mut rx) = - tokio::sync::mpsc::channel::>(150); - // TODO: We only start a thread because of stupid `ManagerStreamAction2` and libp2p's `!Send/!Sync` bounds on a stream. - tokio::spawn(async move { - let Ok(()) = operations::request_file( - stream, - &library, - file_path_pub_id, - Range::Full, - MpscToAsyncWrite::new(PollSender::new(tx)), - ) - .await - else { - return; - }; - }); + let (tx, mut rx) = tokio::sync::mpsc::channel::>(150); + // TODO: We only start a thread because of stupid `ManagerStreamAction2` and libp2p's `!Send/!Sync` bounds on a stream. + tokio::spawn(async move { + let Ok(()) = operations::request_file( + stream, + &library, + file_path_pub_id, + Range::Full, + MpscToAsyncWrite::new(PollSender::new(tx)), + ) + .await + else { + return; + }; + }); - // TODO: Content Type - Ok(InfallibleResponse::builder().status(StatusCode::OK).body( - body::boxed(StreamBody::new(stream! { - while let Some(item) = rx.recv().await { - yield item; - } - })), - )) - } - None => Ok(not_found(())), - } + // TODO: Content Type + Ok(InfallibleResponse::builder().status(StatusCode::OK).body( + body::boxed(StreamBody::new(stream! { + while let Some(item) = rx.recv().await { + yield item; + } + })), + )) } } }, diff --git a/core/src/lib.rs b/core/src/lib.rs index 561cb3303..0b263ba39 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -114,7 +114,9 @@ impl Node { let (jobs, jobs_actor) = job::Jobs::new(); let libraries = library::Libraries::new(data_dir.join("libraries")).await?; - let (p2p, p2p_actor) = p2p::P2PManager::new(config.clone(), libraries.clone()).await?; + let (p2p, start_p2p) = p2p::P2PManager::new(config.clone(), libraries.clone()) + .await + .map_err(NodeError::P2PManager)?; let node = Arc::new(Node { data_dir: data_dir.to_path_buf(), @@ -160,7 +162,7 @@ impl Node { locations_actor.start(node.clone()); node.libraries.init(&node).await?; jobs_actor.start(node.clone()); - p2p_actor.start(node.clone()); + start_p2p(node.clone()); let router = api::mount(); @@ -188,7 +190,7 @@ impl Node { std::env::set_var( "RUST_LOG", - format!("info,sd_core={level},sd_core::location::manager=info,sd_ai={level}"), + format!("info,sd_core={level},sd_p2p=debug,sd_core::location::manager=info,sd_ai={level}"), ); } @@ -325,7 +327,7 @@ pub enum NodeError { #[error("failed to initialize location manager: {0}")] LocationManager(#[from] LocationManagerError), #[error("failed to initialize p2p manager: {0}")] - P2PManager(#[from] sd_p2p::ManagerError), + P2PManager(String), #[error("invalid platform integer: {0}")] InvalidPlatformInt(u8), #[cfg(debug_assertions)] diff --git a/core/src/library/config.rs b/core/src/library/config.rs index 8c267495e..4505cd3c5 100644 --- a/core/src/library/config.rs +++ b/core/src/library/config.rs @@ -3,7 +3,7 @@ use crate::{ util::version_manager::{Kind, ManagedVersion, VersionManager, VersionManagerError}, }; -use sd_p2p::spacetunnel::{Identity, IdentityOrRemoteIdentity}; +use sd_p2p2::{Identity, IdentityOrRemoteIdentity}; use sd_prisma::prisma::{file_path, indexer_rule, instance, location, node, PrismaClient}; use sd_utils::{db::maybe_missing, error::FileIOError}; @@ -163,12 +163,7 @@ impl LibraryConfig { db.node() .update_many( vec![], - vec![ - node::pub_id::set(node_config.id.as_bytes().to_vec()), - node::node_peer_id::set(Some( - node_config.keypair.peer_id().to_string(), - )), - ], + vec![node::pub_id::set(node_config.id.as_bytes().to_vec())], ) .exec() .await?; diff --git a/core/src/library/library.rs b/core/src/library/library.rs index c474aee2a..7eb9c489f 100644 --- a/core/src/library/library.rs +++ b/core/src/library/library.rs @@ -1,7 +1,7 @@ use crate::{api::CoreEvent, object::media::thumbnail::get_indexed_thumbnail_path, sync, Node}; use sd_file_path_helper::{file_path_to_full_path, IsolatedFilePathData}; -use sd_p2p::spacetunnel::Identity; +use sd_p2p2::Identity; use sd_prisma::prisma::{file_path, location, PrismaClient}; use sd_utils::{db::maybe_missing, error::FileIOError}; @@ -67,6 +67,7 @@ impl Debug for Library { } impl Library { + #[allow(clippy::too_many_arguments)] pub async fn new( id: Uuid, config: LibraryConfig, diff --git a/core/src/library/manager/error.rs b/core/src/library/manager/error.rs index 3d00c5990..05775d244 100644 --- a/core/src/library/manager/error.rs +++ b/core/src/library/manager/error.rs @@ -3,7 +3,7 @@ use crate::{ location::{indexer, LocationManagerError}, }; -use sd_p2p::spacetunnel::IdentityOrRemoteIdentityErr; +use sd_p2p2::IdentityOrRemoteIdentityErr; use sd_utils::{ db::{self, MissingFieldError}, error::{FileIOError, NonUtf8PathError}, diff --git a/core/src/library/manager/mod.rs b/core/src/library/manager/mod.rs index 8cd69ba5a..524579801 100644 --- a/core/src/library/manager/mod.rs +++ b/core/src/library/manager/mod.rs @@ -1,20 +1,19 @@ use crate::{ api::{utils::InvalidateOperationEvent, CoreEvent}, - invalidate_query, + cloud, invalidate_query, location::{ indexer, metadata::{LocationMetadataError, SpacedriveLocationMetadataFile}, }, node::Platform, object::tag, - p2p::{self}, - sync, + p2p, sync, util::{mpscrr, MaybeUndefined}, Node, }; use sd_core_sync::SyncMessage; -use sd_p2p::spacetunnel::{Identity, IdentityOrRemoteIdentity}; +use sd_p2p2::{Identity, IdentityOrRemoteIdentity}; use sd_prisma::prisma::{crdt_operation, instance, location, SortOrder}; use sd_utils::{ db, @@ -535,7 +534,7 @@ impl Libraries { loop { debug!("Syncing library with cloud!"); - if let Some(_) = library.config().await.cloud_id { + if library.config().await.cloud_id.is_some() { if let Ok(lib) = sd_cloud_api::library::get(node.cloud_api_config().await, library.id) .await @@ -575,7 +574,7 @@ impl Libraries { } } - if &lib.name != &*library.config().await.name { + if lib.name != *library.config().await.name { warn!("Library name on cloud is outdated. Updating..."); if let Err(err) = sd_cloud_api::library::update( @@ -593,17 +592,16 @@ impl Libraries { } for instance in lib.instances { - if let Err(err) = - crate::cloud::sync::receive::create_instance( - &library, - &node.libraries, - instance.uuid, - instance.identity, - instance.node_id, - instance.node_name, - instance.node_platform, - ) - .await + if let Err(err) = cloud::sync::receive::create_instance( + &library, + &node.libraries, + instance.uuid, + instance.identity, + instance.node_id, + instance.node_name, + instance.node_platform, + ) + .await { error!( "Failed to create instance from cloud: {:#?}", diff --git a/core/src/node/config.rs b/core/src/node/config.rs index 379b90ab4..09d8340bd 100644 --- a/core/src/node/config.rs +++ b/core/src/node/config.rs @@ -4,7 +4,7 @@ use crate::{ util::version_manager::{Kind, ManagedVersion, VersionManager, VersionManagerError}, }; -use sd_p2p::{Keypair, ManagerConfig}; +use sd_p2p2::Identity; use sd_utils::error::FileIOError; use std::{ @@ -28,6 +28,29 @@ use uuid::Uuid; /// NODE_STATE_CONFIG_NAME is the name of the file which stores the NodeState pub const NODE_STATE_CONFIG_NAME: &str = "node_state.sdconfig"; +#[derive(Debug, Default, Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Type)] +pub enum P2PDiscoveryState { + #[default] + Everyone, + ContactsOnly, + Disabled, +} + +#[derive(Debug, Default, Clone, Copy, Serialize, Deserialize, Type)] +#[serde(rename_all = "snake_case", untagged)] +pub enum Port { + Disabled, + #[default] + Random, + Discrete(u16), +} + +impl Port { + pub fn is_random(&self) -> bool { + matches!(self, Port::Random) + } +} + /// NodeConfig is the configuration for a node. This is shared between all libraries and is stored in a JSON file on disk. #[derive(Debug, Clone, Serialize, Deserialize)] // If you are adding `specta::Type` on this your probably about to leak the P2P private key pub struct NodeConfig { @@ -40,10 +63,15 @@ pub struct NodeConfig { pub notifications: Vec, /// The p2p identity keypair for this node. This is used to identify the node on the network. /// This keypair does effectively nothing except for provide libp2p with a stable peer_id. - pub keypair: Keypair, + #[serde(with = "identity_serde")] + pub identity: Identity, /// P2P config + #[serde(default, skip_serializing_if = "Port::is_random")] + pub p2p_ipv4_port: Port, + #[serde(default, skip_serializing_if = "Port::is_random")] + pub p2p_ipv6_port: Port, #[serde(default)] - pub p2p: ManagerConfig, + pub p2p_discovery: P2PDiscoveryState, /// Feature flags enabled on the node #[serde(default)] pub features: Vec, @@ -60,6 +88,30 @@ pub struct NodeConfig { version: NodeConfigVersion, } +mod identity_serde { + use sd_p2p2::Identity; + use serde::{Deserialize, Deserializer, Serialize, Serializer}; + + pub fn serialize(identity: &Identity, serializer: S) -> Result + where + S: Serializer, + { + to_string(identity).serialize(serializer) + } + + pub fn deserialize<'de, D>(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let s = String::deserialize(deserializer)?; + Identity::from_bytes(&base91::slice_decode(s.as_bytes())).map_err(serde::de::Error::custom) + } + + pub fn to_string(identity: &Identity) -> String { + String::from_utf8_lossy(&base91::slice_encode(&identity.to_bytes())).to_string() + } +} + #[derive(Debug, Clone, Serialize, Deserialize, Default, PartialEq, Eq, Type)] pub struct NodePreferences { pub thumbnailer: ThumbnailerPreferences, @@ -73,10 +125,11 @@ pub enum NodeConfigVersion { V0 = 0, V1 = 1, V2 = 2, + V3 = 3, } impl ManagedVersion for NodeConfig { - const LATEST_VERSION: NodeConfigVersion = NodeConfigVersion::V2; + const LATEST_VERSION: NodeConfigVersion = NodeConfigVersion::V3; const KIND: Kind = Kind::Json("version"); type MigrationError = NodeConfigError; @@ -99,9 +152,11 @@ impl ManagedVersion for NodeConfig { Some(Self { id: Uuid::new_v4(), name, - keypair: Keypair::generate(), + identity: Identity::default(), + p2p_ipv4_port: Port::Random, + p2p_ipv6_port: Port::Random, + p2p_discovery: P2PDiscoveryState::Everyone, version: Self::LATEST_VERSION, - p2p: ManagerConfig::default(), features: vec![], notifications: vec![], auth_token: None, @@ -173,6 +228,33 @@ impl NodeConfig { .map_err(|e| FileIOError::from((path, e)))?; } + (NodeConfigVersion::V2, NodeConfigVersion::V3) => { + let mut config: Map = + serde_json::from_slice(&fs::read(path).await.map_err(|e| { + FileIOError::from(( + path, + e, + "Failed to read node config file for migration", + )) + })?) + .map_err(VersionManagerError::SerdeJson)?; + + config.remove("keypair"); + config.remove("p2p"); + + config.insert( + String::from("identity"), + json!(identity_serde::to_string(&Default::default())), + ); + + let a = + serde_json::to_vec(&config).map_err(VersionManagerError::SerdeJson)?; + + fs::write(path, a) + .await + .map_err(|e| FileIOError::from((path, e)))?; + } + _ => { error!("Node config version is not handled: {:?}", current); return Err(VersionManagerError::UnexpectedMigration { diff --git a/core/src/p2p/connect_hook.rs b/core/src/p2p/connect_hook.rs new file mode 100644 index 000000000..f52812841 --- /dev/null +++ b/core/src/p2p/connect_hook.rs @@ -0,0 +1,26 @@ +// TODO: This is unused but will be used in the future. +// use std::sync::Arc; + +// use sd_p2p2::{flume::bounded, HookEvent, P2P}; + +// /// A P2P hook which listens for the availability of peers and connects with them. +// pub struct ConnectHook {} + +// impl ConnectHook { +// pub fn spawn(p2p: Arc) -> Self { +// let (tx, rx) = bounded(15); +// let _ = p2p.register_hook("sd-connect-hook", tx); + +// tokio::spawn(async move { +// while let Ok(event) = rx.recv_async().await { +// match event { +// // TODO: Do the thing. For now we don't need this. +// HookEvent::Shutdown { _guard } => break, +// _ => continue, +// } +// } +// }); + +// Self {} +// } +// } diff --git a/core/src/p2p/events.rs b/core/src/p2p/events.rs new file mode 100644 index 000000000..30d71e757 --- /dev/null +++ b/core/src/p2p/events.rs @@ -0,0 +1,116 @@ +use std::sync::Arc; + +use sd_p2p2::{flume::bounded, HookEvent, RemoteIdentity, P2P}; +use serde::Serialize; +use specta::Type; +use tokio::sync::broadcast; +use uuid::Uuid; + +use super::PeerMetadata; + +/// TODO: P2P event for the frontend +#[derive(Debug, Clone, Serialize, Type)] +#[serde(tag = "type")] +pub enum P2PEvent { + DiscoveredPeer { + identity: RemoteIdentity, + metadata: PeerMetadata, + }, + ExpiredPeer { + identity: RemoteIdentity, + }, + ConnectedPeer { + identity: RemoteIdentity, + }, + DisconnectedPeer { + identity: RemoteIdentity, + }, + SpacedropRequest { + id: Uuid, + identity: RemoteIdentity, + peer_name: String, + files: Vec, + }, + SpacedropProgress { + id: Uuid, + percent: u8, + }, + SpacedropTimedout { + id: Uuid, + }, + SpacedropRejected { + id: Uuid, + }, +} + +/// A P2P hook which listens for events and sends them over a channel which can be connected to the frontend. +pub struct P2PEvents { + events: (broadcast::Sender, broadcast::Receiver), +} + +impl P2PEvents { + pub fn spawn(p2p: Arc) -> Self { + let events = broadcast::channel(15); + let (tx, rx) = bounded(15); + let _ = p2p.register_hook("sd-frontend-events", tx); + + let events_tx = events.0.clone(); + tokio::spawn(async move { + while let Ok(event) = rx.recv_async().await { + let event = match event { + // We use `HookEvent::PeerUnavailable`/`HookEvent::PeerAvailable` over `HookEvent::PeerExpiredBy`/`HookEvent::PeerDiscoveredBy` so that having an active connection is treated as "discovered". + // It's possible to have an active connection without mDNS data (which is what Peer*By` are for) + HookEvent::PeerAvailable(peer) => { + let metadata = match PeerMetadata::from_hashmap(&peer.metadata()) { + Ok(metadata) => metadata, + Err(e) => { + println!( + "Invalid metadata for peer '{}': {:?}", + peer.identity(), + e + ); + continue; + } + }; + + P2PEvent::DiscoveredPeer { + identity: peer.identity(), + metadata, + } + } + HookEvent::PeerUnavailable(identity) => P2PEvent::ExpiredPeer { identity }, + HookEvent::PeerConnectedWith(_, peer) => P2PEvent::ConnectedPeer { + identity: peer.identity(), + }, + HookEvent::PeerDisconnectedWith(_, identity) => { + let peers = p2p.peers(); + let Some(peer) = peers.get(&identity) else { + continue; + }; + + if !peer.is_connected() { + P2PEvent::DisconnectedPeer { identity } + } else { + continue; + } + } + HookEvent::Shutdown { _guard } => break, + _ => continue, + }; + + let _ = events_tx.send(event); + } + }); + + Self { events } + } + + pub fn subscribe(&self) -> broadcast::Receiver { + self.events.0.subscribe() + } + + #[allow(clippy::result_large_err)] + pub fn send(&self, event: P2PEvent) -> Result> { + self.events.0.send(event) + } +} diff --git a/core/src/p2p/libraries.rs b/core/src/p2p/libraries.rs index 7df5a94f4..77969e7d9 100644 --- a/core/src/p2p/libraries.rs +++ b/core/src/p2p/libraries.rs @@ -1,70 +1,31 @@ -#![allow(unused)] // TODO: Remove this +use std::sync::Arc; -use crate::library::{Libraries, Library, LibraryManagerEvent}; +use sd_p2p2::P2P; +use tracing::error; -use sd_p2p::{spacetunnel::IdentityOrRemoteIdentity, Service}; +use crate::library::{Libraries, LibraryManagerEvent}; -use std::{ - collections::HashMap, - fmt, - sync::{Arc, PoisonError, RwLock}, -}; - -use tokio::sync::mpsc; -use tracing::{error, warn}; -use uuid::Uuid; - -use super::{LibraryMetadata, P2PManager}; - -pub struct LibraryServices { - services: RwLock>>>, - register_service_tx: mpsc::Sender>>, -} - -impl fmt::Debug for LibraryServices { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("LibraryServices") - .field( - "services", - &self - .services - .read() - .unwrap_or_else(PoisonError::into_inner) - .keys(), - ) - .finish() - } -} - -impl LibraryServices { - pub fn new(register_service_tx: mpsc::Sender>>) -> Self { - Self { - services: Default::default(), - register_service_tx, - } - } - - pub(crate) async fn start(manager: Arc, libraries: Arc) { +pub fn start(p2p: Arc, libraries: Arc) { + tokio::spawn(async move { if let Err(err) = libraries .rx .clone() .subscribe(|msg| { - let manager = manager.clone(); + let p2p = p2p.clone(); async move { match msg { LibraryManagerEvent::InstancesModified(library) | LibraryManagerEvent::Load(library) => { - manager - .clone() - .libraries - .load_library(manager, &library) - .await + p2p.metadata_mut().insert( + library.id.to_string(), + library.identity.to_remote_identity().to_string(), + ); } - LibraryManagerEvent::Edit(library) => { - manager.libraries.edit_library(&library).await + LibraryManagerEvent::Edit(_library) => { + // TODO: Send changes to all connected nodes or queue sending for when they are online! } LibraryManagerEvent::Delete(library) => { - manager.libraries.delete_library(&library).await + p2p.metadata_mut().remove(&library.id.to_string()); } } } @@ -73,87 +34,5 @@ impl LibraryServices { { error!("Core may become unstable! `LibraryServices::start` manager aborted with error: {err:?}"); } - } - - pub fn get(&self, id: &Uuid) -> Option>> { - self.services - .read() - .unwrap_or_else(PoisonError::into_inner) - .get(id) - .cloned() - } - - pub fn libraries(&self) -> Vec<(Uuid, Arc>)> { - self.services - .read() - .unwrap_or_else(PoisonError::into_inner) - .iter() - .map(|(k, v)| (*k, v.clone())) - .collect::>() - } - - pub(crate) async fn load_library(&self, manager: Arc, library: &Library) { - let identities = match library.db.instance().find_many(vec![]).exec().await { - Ok(library) => library - .into_iter() - .filter_map( - // TODO: Error handling - |i| match IdentityOrRemoteIdentity::from_bytes(&i.identity) { - Err(err) => { - warn!("error parsing identity: {err:?}"); - None - } - Ok(IdentityOrRemoteIdentity::Identity(_)) => None, - Ok(IdentityOrRemoteIdentity::RemoteIdentity(identity)) => Some(identity), - }, - ) - .collect(), - Err(err) => { - warn!("error loading library '{}': {err:?}", library.id); - return; - } - }; - - let mut inserted = false; - - let service = { - let mut service = self - .services - .write() - .unwrap_or_else(PoisonError::into_inner); - let service = service.entry(library.id).or_insert_with(|| { - inserted = true; - Arc::new( - Service::new( - String::from_utf8_lossy(&base91::slice_encode(library.id.as_bytes())), - manager.manager.clone(), - ) - .expect("error creating service with duplicate service name"), - ) - }); - service.add_known(identities); - service.clone() - }; - - if inserted { - service.update(LibraryMetadata {}); - if self.register_service_tx.send(service).await.is_err() { - warn!("error sending on 'register_service_tx'. This indicates a bug!"); - } - } - } - - pub(crate) async fn edit_library(&self, _library: &Library) { - // TODO: Send changes to all connected nodes! - // TODO: Update mdns - } - - pub(crate) async fn delete_library(&self, library: &Library) { - drop( - self.services - .write() - .unwrap_or_else(PoisonError::into_inner) - .remove(&library.id), - ); - } + }); } diff --git a/core/src/p2p/library_metadata.rs b/core/src/p2p/library_metadata.rs deleted file mode 100644 index 0a6e2b198..000000000 --- a/core/src/p2p/library_metadata.rs +++ /dev/null @@ -1,22 +0,0 @@ -use sd_p2p::Metadata; - -use std::collections::HashMap; - -use serde::{Deserialize, Serialize}; -use specta::Type; - -#[derive(Debug, Clone, Type, Serialize, Deserialize)] -pub struct LibraryMetadata {} - -impl Metadata for LibraryMetadata { - fn to_hashmap(self) -> HashMap { - HashMap::with_capacity(0) - } - - fn from_hashmap(_: &HashMap) -> Result - where - Self: Sized, - { - Ok(Self {}) - } -} diff --git a/core/src/p2p/manager.rs b/core/src/p2p/manager.rs new file mode 100644 index 000000000..0e4daeae0 --- /dev/null +++ b/core/src/p2p/manager.rs @@ -0,0 +1,311 @@ +use crate::{ + node::{ + config::{self, P2PDiscoveryState, Port}, + get_hardware_model_name, HardwareModel, + }, + p2p::{libraries, operations, sync::SyncMessage, Header, OperatingSystem, SPACEDRIVE_APP_ID}, + Node, +}; + +use sd_p2p2::{ + flume::{bounded, Receiver}, + Libp2pPeerId, Listener, Mdns, Peer, QuicTransport, RemoteIdentity, UnicastStream, P2P, +}; +use sd_p2p_tunnel::Tunnel; +use serde::Serialize; +use serde_json::json; +use specta::Type; +use std::{ + collections::{HashMap, HashSet}, + net::SocketAddr, + sync::{atomic::AtomicBool, Arc, Mutex, PoisonError}, +}; + +use tokio::sync::oneshot; +use tracing::{error, info}; +use uuid::Uuid; + +use super::{P2PEvents, PeerMetadata}; + +pub struct P2PManager { + pub(crate) p2p: Arc, + mdns: Mutex>, + quic: QuicTransport, + // The `libp2p::PeerId`. This is for debugging only, use `RemoteIdentity` instead. + lp2p_peer_id: Libp2pPeerId, + pub(crate) events: P2PEvents, + // connect_hook: ConnectHook, + pub(super) spacedrop_pairing_reqs: Arc>>>>, + pub(super) spacedrop_cancelations: Arc>>>, + pub(crate) node_config: Arc, +} + +impl P2PManager { + pub async fn new( + node_config: Arc, + libraries: Arc, + ) -> Result<(Arc, impl FnOnce(Arc)), String> { + let (tx, rx) = bounded(25); + let p2p = P2P::new(SPACEDRIVE_APP_ID, node_config.get().await.identity, tx); + let (quic, lp2p_peer_id) = QuicTransport::spawn(p2p.clone())?; + let this = Arc::new(Self { + p2p: p2p.clone(), + lp2p_peer_id, + mdns: Mutex::new(None), + quic, + events: P2PEvents::spawn(p2p.clone()), + // connect_hook: ConnectHook::spawn(p2p), + spacedrop_pairing_reqs: Default::default(), + spacedrop_cancelations: Default::default(), + node_config, + }); + this.on_node_config_change().await; + + libraries::start(this.p2p.clone(), libraries); + + info!( + "Node RemoteIdentity('{}') libp2p::PeerId('{:?}') is now online listening at addresses: {:?}", + this.p2p.remote_identity(), + this.lp2p_peer_id, + this.p2p.listeners() + ); + + Ok((this.clone(), |node| { + tokio::spawn(start(this, node, rx)); + })) + } + + // TODO: Remove this and add a subscription system to `config::Manager` + pub async fn on_node_config_change(&self) { + let config = self.node_config.get().await; + + PeerMetadata { + name: config.name.clone(), + operating_system: Some(OperatingSystem::get_os()), + device_model: Some(get_hardware_model_name().unwrap_or(HardwareModel::Other)), + version: Some(env!("CARGO_PKG_VERSION").to_string()), + } + .update(&mut self.p2p.metadata_mut()); + + let port = match config.p2p_ipv4_port { + Port::Disabled => None, + Port::Random => Some(0), + Port::Discrete(port) => Some(port), + }; + info!("Setting quic ipv4 listener to: {port:?}"); + if let Err(err) = self.quic.set_ipv4_enabled(port).await { + error!("Failed to enabled quic ipv4 listener: {err}"); + self.node_config + .write(|c| c.p2p_ipv4_port = Port::Disabled) + .await + .ok(); + } + + let port = match config.p2p_ipv6_port { + Port::Disabled => None, + Port::Random => Some(0), + Port::Discrete(port) => Some(port), + }; + info!("Setting quic ipv4 listener to: {port:?}"); + if let Err(err) = self.quic.set_ipv6_enabled(port).await { + error!("Failed to enabled quic ipv6 listener: {err}"); + self.node_config + .write(|c| c.p2p_ipv6_port = Port::Disabled) + .await + .ok(); + } + + let should_revert = match config.p2p_discovery { + P2PDiscoveryState::Everyone + // TODO: Make `ContactsOnly` work + | P2PDiscoveryState::ContactsOnly => { + let mut mdns = self.mdns.lock().unwrap_or_else(PoisonError::into_inner); + if mdns.is_none() { + match Mdns::spawn(self.p2p.clone()) { + Ok(m) => { + info!("mDNS started successfully."); + *mdns = Some(m); + false + } + Err(err) => { + error!("Failed to start mDNS: {err}"); + true + } + } + } else { + false + } + } + P2PDiscoveryState::Disabled => { + let mdns = { + let mut mdns = self.mdns.lock().unwrap_or_else(PoisonError::into_inner); + mdns.take() + }; + if let Some(mdns) = mdns { + mdns.shutdown().await; + info!("mDNS shutdown successfully."); + } + + false + }, + }; + + // The `should_revert` bit is weird but we need this future to stay `Send` as rspc requires. + // To make it send we have to drop `quic` (a `!Send` `MutexGuard`). + // Doing it within the above scope seems to not work (even when manually calling `drop`). + if should_revert { + let _ = self + .node_config + .write(|c| c.p2p_discovery = P2PDiscoveryState::Disabled) + .await; + } + } + + pub fn get_library_instances(&self, library: &Uuid) -> Vec<(RemoteIdentity, Arc)> { + let library_id = library.to_string(); + self.p2p + .peers() + .iter() + .filter(|(_, p)| p.metadata().contains_key(&library_id)) + .map(|(i, p)| (*i, p.clone())) + .collect() + } + + pub fn get_instance(&self, library: &Uuid, identity: RemoteIdentity) -> Option> { + let library_id = library.to_string(); + self.p2p + .peers() + .iter() + .find(|(i, p)| **i == identity && p.metadata().contains_key(&library_id)) + .map(|(_, p)| p.clone()) + } + + pub async fn state(&self) -> serde_json::Value { + let listeners = self.p2p.listeners(); + let node_config = self.node_config.get().await; + json!({ + "self_identity": self.p2p.remote_identity().to_string(), + "self_peer_id": format!("{:?}", self.lp2p_peer_id), + "metadata": self.p2p.metadata().clone(), + "peers": self.p2p.peers().iter().map(|(identity, p)| json!({ + "identity": identity.to_string(), + "metadata": p.metadata().clone(), + "can_connect": p.can_connect(), + "is_connected": p.is_connected(), + "active_connections": p.active_connections(), + "connection_methods": p.connection_methods().iter().map(|id| format!("{:?}", id)).collect::>(), + "discovered_by": p.discovered_by().iter().map(|id| format!("{:?}", id)).collect::>(), + })).collect::>(), + "hooks": self.p2p.hooks().iter().map(|(id, name)| json!({ + "id": format!("{:?}", id), + "name": name, + "listener_addrs": listeners.iter().find(|l| l.is_hook_id(*id)).map(|l| l.addrs.clone()), + })).collect::>(), + "config": json!({ + "p2p_ipv4_port": node_config.p2p_ipv4_port, + "p2p_ipv6_port": node_config.p2p_ipv6_port, + "p2p_discovery": node_config.p2p_discovery, + }) + + }) + } + + pub async fn shutdown(&self) { + // `self.p2p` will automatically take care of shutting down all the hooks. Eg. `self.quic`, `self.mdns`, etc. + self.p2p.shutdown().await; + } +} + +async fn start( + this: Arc, + node: Arc, + rx: Receiver, +) -> Result<(), ()> { + while let Ok(mut stream) = rx.recv_async().await { + let this = this.clone(); + let node = node.clone(); + tokio::spawn(async move { + println!("APPLICATION GOT STREAM: {:?}", stream); // TODO + + let Ok(header) = Header::from_stream(&mut stream).await.map_err(|err| { + error!("Failed to read header from stream: {}", err); + }) else { + return; + }; + + match header { + Header::Ping => operations::ping::reciever(stream).await, + Header::Spacedrop(req) => { + let Err(()) = operations::spacedrop::reciever(&this, req, stream).await else { + return; + }; + + error!("Failed to handle Spacedrop request"); + } + Header::Sync(library_id) => { + let Ok(mut tunnel) = Tunnel::responder(stream).await.map_err(|err| { + error!("Failed `Tunnel::responder`: {}", err); + }) else { + return; + }; + + let Ok(msg) = SyncMessage::from_stream(&mut tunnel).await.map_err(|err| { + error!("Failed `SyncMessage::from_stream`: {}", err); + }) else { + return; + }; + + let Ok(library) = + node.libraries + .get_library(&library_id) + .await + .ok_or_else(|| { + error!("Failed to get library '{library_id}'"); + + // TODO: Respond to remote client with warning! + }) + else { + return; + }; + + match msg { + SyncMessage::NewOperations => { + let Err(()) = super::sync::responder(&mut tunnel, library).await else { + return; + }; + + error!("Failed to handle sync responder request"); + } + }; + } + Header::File(req) => { + let Err(()) = operations::request_file::receiver(&node, req, stream).await + else { + return; + }; + + error!("Failed to handle file request"); + } + }; + }); + } + + Ok::<_, ()>(()) +} + +#[derive(Debug, Serialize, Type)] +pub struct Listener2 { + pub id: String, + pub name: &'static str, + pub addrs: HashSet, +} + +pub fn into_listener2(l: &[Listener]) -> Vec { + l.iter() + .map(|l| Listener2 { + id: format!("{:?}", l.id), + name: l.name, + addrs: l.addrs.clone(), + }) + .collect() +} diff --git a/core/src/p2p/peer_metadata.rs b/core/src/p2p/metadata.rs similarity index 91% rename from core/src/p2p/peer_metadata.rs rename to core/src/p2p/metadata.rs index 10c397ee2..47d93dbd6 100644 --- a/core/src/p2p/peer_metadata.rs +++ b/core/src/p2p/metadata.rs @@ -1,7 +1,5 @@ use crate::node::{HardwareModel, Platform}; -use sd_p2p::Metadata; - use std::{collections::HashMap, env, str::FromStr}; use serde::{Deserialize, Serialize}; @@ -15,10 +13,9 @@ pub struct PeerMetadata { pub version: Option, } -impl Metadata for PeerMetadata { - fn to_hashmap(self) -> HashMap { - let mut map = HashMap::with_capacity(5); - map.insert("name".to_owned(), self.name); +impl PeerMetadata { + pub fn update(self, map: &mut HashMap) { + map.insert("name".to_owned(), self.name.clone()); if let Some(os) = self.operating_system { map.insert("os".to_owned(), os.to_string()); } @@ -28,13 +25,9 @@ impl Metadata for PeerMetadata { if let Some(device_model) = self.device_model { map.insert("device_model".to_owned(), device_model.to_string()); } - map } - fn from_hashmap(data: &HashMap) -> Result - where - Self: Sized, - { + pub fn from_hashmap(data: &HashMap) -> Result { Ok(Self { name: data .get("name") diff --git a/core/src/p2p/mod.rs b/core/src/p2p/mod.rs index b1ad6eec3..c958fae5c 100644 --- a/core/src/p2p/mod.rs +++ b/core/src/p2p/mod.rs @@ -1,22 +1,19 @@ #![warn(clippy::all, clippy::unwrap_used, clippy::panic)] #![allow(clippy::unnecessary_cast)] // Yeah they aren't necessary on this arch, but they are on others -mod libraries; -mod library_metadata; +mod connect_hook; +mod events; +pub(super) mod libraries; +mod manager; +mod metadata; pub mod operations; -mod p2p_events; -mod p2p_manager; -mod p2p_manager_actor; -mod peer_metadata; mod protocol; pub mod sync; -pub use libraries::*; -pub use library_metadata::*; -pub use p2p_events::*; -pub use p2p_manager::*; -pub use p2p_manager_actor::*; -pub use peer_metadata::*; +// pub use connect_hook::*; +pub use events::*; +pub use manager::*; +pub use metadata::*; pub use protocol::*; pub(super) const SPACEDRIVE_APP_ID: &str = "sd"; diff --git a/core/src/p2p/operations/ping.rs b/core/src/p2p/operations/ping.rs index 58a2936c6..70701cf1b 100644 --- a/core/src/p2p/operations/ping.rs +++ b/core/src/p2p/operations/ping.rs @@ -1,17 +1,12 @@ -use crate::p2p::P2PManager; - -use sd_p2p::PeerMessageEvent; - -use std::sync::Arc; - +use sd_p2p2::UnicastStream; use tracing::debug; /// Send a ping to all peers we are connected to #[allow(unused)] -pub async fn ping(_p2p: Arc) { +pub async fn ping() { todo!(); } -pub(crate) async fn reciever(event: PeerMessageEvent) { - debug!("Received ping from peer '{}'", event.identity); +pub(crate) async fn reciever(stream: UnicastStream) { + debug!("Received ping from peer '{}'", stream.remote_identity()); } diff --git a/core/src/p2p/operations/request_file.rs b/core/src/p2p/operations/request_file.rs index 81546bc01..077d5f412 100644 --- a/core/src/p2p/operations/request_file.rs +++ b/core/src/p2p/operations/request_file.rs @@ -5,12 +5,15 @@ use crate::{ }; use sd_file_path_helper::{file_path_to_handle_p2p_serve_file, IsolatedFilePathData}; -use sd_p2p::{ - spaceblock::{BlockSize, Range, SpaceblockRequest, SpaceblockRequests, Transfer}, - spacetime::UnicastStream, - PeerMessageEvent, -}; +use sd_p2p2::UnicastStream; +use sd_p2p_block::{BlockSize, Range, SpaceblockRequest, SpaceblockRequests, Transfer}; use sd_prisma::prisma::file_path; +use tokio::{ + fs::File, + io::{AsyncReadExt, AsyncWrite, AsyncWriteExt, BufReader}, +}; +use tracing::{debug, warn}; +use uuid::Uuid; use std::{ path::Path, @@ -20,13 +23,6 @@ use std::{ }, }; -use tokio::{ - fs::File, - io::{AsyncReadExt, AsyncWrite, AsyncWriteExt, BufReader}, -}; -use tracing::{debug, warn}; -use uuid::Uuid; - /// Request a file from the remote machine over P2P. This is used for preview media and quick preview. /// /// DO NOT USE THIS WITHOUT `node.files_over_p2p_flag == true` @@ -111,9 +107,8 @@ pub(crate) async fn receiver( file_path_id, range, }: HeaderFile, - event: PeerMessageEvent, + mut stream: UnicastStream, ) -> Result<(), ()> { - let mut stream = event.stream; #[allow(clippy::panic)] // If you've made it this far that's on you. if !node.files_over_p2p_flag.load(Ordering::Relaxed) { panic!("Files over P2P is disabled!"); diff --git a/core/src/p2p/operations/spacedrop.rs b/core/src/p2p/operations/spacedrop.rs index a3adaabb7..804ff2b5c 100644 --- a/core/src/p2p/operations/spacedrop.rs +++ b/core/src/p2p/operations/spacedrop.rs @@ -1,22 +1,17 @@ -use crate::p2p::{Header, P2PEvent, P2PManager}; - -use sd_p2p::{ - spaceblock::{BlockSize, Range, SpaceblockRequest, SpaceblockRequests, Transfer}, - spacetunnel::RemoteIdentity, - PeerMessageEvent, -}; - use std::{ borrow::Cow, path::PathBuf, sync::{ atomic::{AtomicBool, Ordering}, - Arc, + Arc, PoisonError, }, time::Duration, }; +use crate::p2p::{Header, P2PEvent, P2PManager}; use futures::future::join_all; +use sd_p2p2::{RemoteIdentity, UnicastStream}; +use sd_p2p_block::{BlockSize, Range, SpaceblockRequest, SpaceblockRequests, Transfer}; use tokio::{ fs::{create_dir_all, File}, io::{AsyncReadExt, AsyncWriteExt, BufReader, BufWriter}, @@ -32,7 +27,6 @@ pub(crate) const SPACEDROP_TIMEOUT: Duration = Duration::from_secs(60); // TODO: Proper error handling pub async fn spacedrop( p2p: Arc, - // TODO: Stop using `PeerId` identity: RemoteIdentity, paths: Vec, ) -> Result { @@ -61,7 +55,10 @@ pub async fn spacedrop( .await .into_iter() .collect::, std::io::Error>>() - .map_err(|_| ())? // TODO: Error handling + .map_err(|err| { + warn!("error opening file: '{err:?}'"); + // TODO: Proper error type + })? .into_iter() .unzip(); @@ -69,8 +66,18 @@ pub async fn spacedrop( let id = Uuid::new_v4(); debug!("({id}): starting Spacedrop with peer '{identity}"); - let mut stream = p2p.manager.stream(identity).await.map_err(|err| { - debug!("({id}): failed to connect: {err:?}"); + let peer = p2p + .p2p + .peers() + .get(&identity) + .ok_or_else(|| { + debug!("({id}): failed to find connection method with '{identity}'"); + // TODO: Proper error + })? + .clone(); + + let mut stream = peer.new_stream().await.map_err(|err| { + debug!("({id}): failed to connect to '{identity}': {err:?}"); // TODO: Proper error })?; @@ -95,7 +102,7 @@ pub async fn spacedrop( // Add 5 seconds incase the user responded on the deadline and slow network _ = sleep(SPACEDROP_TIMEOUT + Duration::from_secs(5)) => { debug!("({id}): timed out, cancelling"); - p2p.events.0.send(P2PEvent::SpacedropTimedout { id }).ok(); + p2p.events.send(P2PEvent::SpacedropTimedout { id }).ok(); return; }, }; @@ -103,18 +110,18 @@ pub async fn spacedrop( match result { Ok(0) => { debug!("({id}): Spacedrop was rejected from peer '{identity}'"); - p2p.events.0.send(P2PEvent::SpacedropRejected { id }).ok(); + p2p.events.send(P2PEvent::SpacedropRejected { id }).ok(); return; } - Ok(1) => {} // Okay - Ok(_) => todo!(), // TODO: Proper error - Err(_) => todo!(), // TODO: Proper error + Ok(1) => {} // Okay + Ok(_) => todo!(), // TODO: Proper error + Err(err) => todo!("{:?}", err), // TODO: Proper error } let cancelled = Arc::new(AtomicBool::new(false)); p2p.spacedrop_cancelations .lock() - .await + .unwrap_or_else(PoisonError::into_inner) .insert(id, cancelled.clone()); debug!("({id}): starting transfer"); @@ -124,7 +131,6 @@ pub async fn spacedrop( &requests, |percent| { p2p.events - .0 .send(P2PEvent::SpacedropProgress { id, percent }) .ok(); }, @@ -138,7 +144,6 @@ pub async fn spacedrop( debug!("({id}): failed to send file '{file_id}': {err}"); // TODO: Error to frontend // p2p.events - // .0 // .send(P2PEvent::SpacedropFailed { id, file_id }) // .ok(); return; @@ -154,7 +159,12 @@ pub async fn spacedrop( // TODO: Move these off the manager impl P2PManager { pub async fn accept_spacedrop(&self, id: Uuid, path: String) { - if let Some(chan) = self.spacedrop_pairing_reqs.lock().await.remove(&id) { + if let Some(chan) = self + .spacedrop_pairing_reqs + .lock() + .unwrap_or_else(PoisonError::into_inner) + .remove(&id) + { chan.send(Some(path)) .map_err(|err| { warn!("error accepting Spacedrop '{id:?}': '{err:?}'"); @@ -164,7 +174,12 @@ impl P2PManager { } pub async fn reject_spacedrop(&self, id: Uuid) { - if let Some(chan) = self.spacedrop_pairing_reqs.lock().await.remove(&id) { + if let Some(chan) = self + .spacedrop_pairing_reqs + .lock() + .unwrap_or_else(PoisonError::into_inner) + .remove(&id) + { chan.send(None) .map_err(|err| { warn!("error rejecting Spacedrop '{id:?}': '{err:?}'"); @@ -174,7 +189,12 @@ impl P2PManager { } pub async fn cancel_spacedrop(&self, id: Uuid) { - if let Some(cancelled) = self.spacedrop_cancelations.lock().await.remove(&id) { + if let Some(cancelled) = self + .spacedrop_cancelations + .lock() + .unwrap_or_else(PoisonError::into_inner) + .remove(&id) + { cancelled.store(true, Ordering::Relaxed); } } @@ -183,26 +203,27 @@ impl P2PManager { pub(crate) async fn reciever( this: &Arc, req: SpaceblockRequests, - event: PeerMessageEvent, + mut stream: UnicastStream, ) -> Result<(), ()> { let id = req.id; - let mut stream = event.stream; let (tx, rx) = oneshot::channel(); info!( "({id}): received '{}' files from peer '{}' with block size '{:?}'", req.requests.len(), - event.identity, + stream.remote_identity(), req.block_size ); - this.spacedrop_pairing_reqs.lock().await.insert(id, tx); + this.spacedrop_pairing_reqs + .lock() + .unwrap_or_else(PoisonError::into_inner) + .insert(id, tx); if this .events - .0 .send(P2PEvent::SpacedropRequest { id, - identity: event.identity, + identity: stream.remote_identity(), peer_name: "Unknown".into(), // TODO: A better solution to this // manager @@ -245,7 +266,7 @@ pub(crate) async fn reciever( let cancelled = Arc::new(AtomicBool::new(false)); this.spacedrop_cancelations .lock() - .await + .unwrap_or_else(PoisonError::into_inner) .insert(id, cancelled.clone()); stream.write_all(&[1]).await.map_err(|err| { @@ -258,7 +279,7 @@ pub(crate) async fn reciever( let names = req.requests.iter().map(|req| req.name.clone()).collect::>(); let mut transfer = Transfer::new(&req, |percent| { - this.events.0.send(P2PEvent::SpacedropProgress { id, percent }).ok(); + this.events.send(P2PEvent::SpacedropProgress { id, percent }).ok(); }, &cancelled); let file_path = PathBuf::from(file_path); diff --git a/core/src/p2p/p2p_events.rs b/core/src/p2p/p2p_events.rs deleted file mode 100644 index bbbed8da0..000000000 --- a/core/src/p2p/p2p_events.rs +++ /dev/null @@ -1,42 +0,0 @@ -use sd_p2p::spacetunnel::RemoteIdentity; - -use serde::Serialize; -use specta::Type; -use uuid::Uuid; - -use super::PeerMetadata; - -/// TODO: P2P event for the frontend -#[derive(Debug, Clone, Serialize, Type)] -#[serde(tag = "type")] -pub enum P2PEvent { - DiscoveredPeer { - identity: RemoteIdentity, - metadata: PeerMetadata, - }, - ExpiredPeer { - identity: RemoteIdentity, - }, - ConnectedPeer { - identity: RemoteIdentity, - }, - DisconnectedPeer { - identity: RemoteIdentity, - }, - SpacedropRequest { - id: Uuid, - identity: RemoteIdentity, - peer_name: String, - files: Vec, - }, - SpacedropProgress { - id: Uuid, - percent: u8, - }, - SpacedropTimedout { - id: Uuid, - }, - SpacedropRejected { - id: Uuid, - }, -} diff --git a/core/src/p2p/p2p_manager.rs b/core/src/p2p/p2p_manager.rs deleted file mode 100644 index bac0b2ee4..000000000 --- a/core/src/p2p/p2p_manager.rs +++ /dev/null @@ -1,169 +0,0 @@ -use crate::{ - node::{config, get_hardware_model_name, HardwareModel}, - p2p::{OperatingSystem, SPACEDRIVE_APP_ID}, -}; - -use sd_p2p::{ - spacetunnel::RemoteIdentity, Manager, ManagerConfig, ManagerError, PeerStatus, Service, -}; -use std::{ - collections::{HashMap, HashSet}, - net::SocketAddr, - sync::{atomic::AtomicBool, Arc}, -}; - -use serde::Serialize; -use specta::Type; -use tokio::sync::{broadcast, mpsc, oneshot, Mutex}; -use tracing::info; -use uuid::Uuid; - -use super::{LibraryMetadata, LibraryServices, P2PEvent, P2PManagerActor, PeerMetadata}; - -pub struct P2PManager { - pub(crate) node: Service, - pub(crate) libraries: LibraryServices, - - pub events: (broadcast::Sender, broadcast::Receiver), - pub manager: Arc, - pub(super) spacedrop_pairing_reqs: Arc>>>>, - pub(super) spacedrop_cancelations: Arc>>>, - node_config_manager: Arc, -} - -impl P2PManager { - pub async fn new( - node_config: Arc, - libraries: Arc, - ) -> Result<(Arc, P2PManagerActor), ManagerError> { - let (keypair, manager_config) = { - let config = node_config.get().await; - (config.keypair, config.p2p.clone()) - }; - - let (manager, stream) = - sd_p2p::Manager::new(SPACEDRIVE_APP_ID, &keypair, manager_config).await?; - - info!( - "Node RemoteIdentity('{}') libp2p::PeerId('{}') is now online listening at addresses: {:?}", - manager.identity(), - manager.libp2p_peer_id(), - stream.listen_addrs() - ); - - let (register_service_tx, register_service_rx) = mpsc::channel(10); - let this = Arc::new(Self { - node: Service::new("node", manager.clone()) - .expect("Hardcoded service name will never be a duplicate!"), - libraries: LibraryServices::new(register_service_tx), - events: broadcast::channel(100), - manager, - spacedrop_pairing_reqs: Default::default(), - spacedrop_cancelations: Default::default(), - node_config_manager: node_config, - }); - this.update_metadata().await; - - tokio::spawn(LibraryServices::start(this.clone(), libraries)); - - Ok(( - this.clone(), - P2PManagerActor { - manager: this, - stream, - register_service_rx, - }, - )) - } - - pub fn get_library_service(&self, library_id: &Uuid) -> Option>> { - self.libraries.get(library_id) - } - - pub async fn update_metadata(&self) { - self.node.update({ - let config = self.node_config_manager.get().await; - PeerMetadata { - name: config.name.clone(), - operating_system: Some(OperatingSystem::get_os()), - device_model: Some(get_hardware_model_name().unwrap_or(HardwareModel::Other)), - version: Some(env!("CARGO_PKG_VERSION").to_string()), - } - }); - } - - pub fn subscribe(&self) -> broadcast::Receiver { - self.events.0.subscribe() - } - - // TODO: Replace this with a better system that is more built into `sd-p2p` crate - pub fn state(&self) -> P2PState { - let ( - self_peer_id, - self_identity, - config, - manager_connected, - manager_connections, - dicovery_services, - discovery_discovered, - discovery_known, - ) = self.manager.get_debug_state(); - - P2PState { - node: self.node.get_state(), - libraries: self - .libraries - .libraries() - .into_iter() - .map(|(id, lib)| (id, lib.get_state())) - .collect(), - self_peer_id: PeerId(self_peer_id), - self_identity, - config, - manager_connected: manager_connected - .into_iter() - .map(|(k, v)| (PeerId(k), v)) - .collect(), - manager_connections: manager_connections.into_iter().map(PeerId).collect(), - dicovery_services, - discovery_discovered: discovery_discovered - .into_iter() - .map(|(k, v)| { - ( - k, - v.into_iter() - .map(|(k, (k1, v, b))| (k, (PeerId(k1), v, b))) - .collect(), - ) - }) - .collect(), - discovery_known, - } - } - - pub async fn shutdown(&self) { - self.manager.shutdown().await; - } -} - -#[derive(Debug, Serialize, Type)] -#[allow(clippy::type_complexity)] -pub struct P2PState { - node: HashMap, - libraries: Vec<(Uuid, HashMap)>, - self_peer_id: PeerId, - self_identity: RemoteIdentity, - config: ManagerConfig, - manager_connected: HashMap, - manager_connections: HashSet, - dicovery_services: HashMap>>, - discovery_discovered: HashMap< - String, - HashMap, Vec)>, - >, - discovery_known: HashMap>, -} - -// TODO: Get this back into `sd-p2p` but keep it private -#[derive(Debug, Serialize, Type, Hash, Eq, PartialEq, Ord, PartialOrd, Clone)] -pub struct PeerId(#[specta(type = String)] sd_p2p::internal::PeerId); diff --git a/core/src/p2p/p2p_manager_actor.rs b/core/src/p2p/p2p_manager_actor.rs deleted file mode 100644 index b7bea6ee3..000000000 --- a/core/src/p2p/p2p_manager_actor.rs +++ /dev/null @@ -1,131 +0,0 @@ -use crate::Node; - -use sd_p2p::{spacetunnel::Tunnel, Event, ManagerStream, Service, ServiceEvent}; - -use std::sync::Arc; - -use futures::StreamExt; -use tokio::sync::mpsc; -use tracing::error; - -use super::{operations, sync::SyncMessage, Header, LibraryMetadata, P2PEvent, P2PManager}; - -pub struct P2PManagerActor { - pub(super) manager: Arc, - pub(super) stream: ManagerStream, - pub(super) register_service_rx: mpsc::Receiver>>, -} - -impl P2PManagerActor { - pub fn start(self, node: Arc) { - let Self { - manager: this, - mut stream, - mut register_service_rx, - } = self; - - tokio::spawn({ - async move { - let mut node_rx = this.node.listen(); - - loop { - tokio::select! { - // TODO: We ignore the response of this but I suspect it will be useful in the future so it stays for now. - Some(_event) = register_service_rx.recv() => {}, - // TODO: We should subscribe to library-level events too but frontend isn't cut out for them right now. - Some(Ok(event)) = node_rx.next() => { - this.events.0 - .send(match event { - ServiceEvent::Discovered { identity, metadata } => - P2PEvent::DiscoveredPeer { - identity, - metadata, - }, - ServiceEvent::Expired { identity } => - P2PEvent::ExpiredPeer { - identity, - }, - }) - .map_err(|_| error!("Failed to send event to p2p event stream!")) - .ok(); - } - Some(event) = stream.next() => { - match event { - Event::PeerConnected(event) => { - this.events - .0 - .send(P2PEvent::ConnectedPeer { - identity: event.identity, - }) - .map_err(|_| error!("Failed to send event to p2p event stream!")) - .ok(); - } - Event::PeerDisconnected(identity) => { - this.events - .0 - .send(P2PEvent::DisconnectedPeer { identity }) - .map_err(|_| error!("Failed to send event to p2p event stream!")) - .ok(); - } - Event::PeerMessage(mut event) => { - let this = this.clone(); - let node = node.clone(); - - tokio::spawn(async move { - let header = Header::from_stream(&mut event.stream) - .await - .map_err(|err| { - error!("Failed to read header from stream: {}", err); - })?; - - match header { - Header::Ping => operations::ping::reciever(event).await, - Header::Spacedrop(req) => { - operations::spacedrop::reciever(&this, req, event).await? - } - Header::Sync(library_id) => { - let mut tunnel = - Tunnel::responder(event.stream).await.map_err(|err| { - error!("Failed `Tunnel::responder`: {}", err); - })?; - - let msg = - SyncMessage::from_stream(&mut tunnel).await.map_err(|err| { - error!("Failed `SyncMessage::from_stream`: {}", err); - })?; - - let library = - node.libraries.get_library(&library_id).await.ok_or_else(|| { - error!("Failed to get library '{library_id}'"); - - // TODO: Respond to remote client with warning! - })?; - - match msg { - SyncMessage::NewOperations => { - super::sync::responder(&mut tunnel, library).await?; - } - }; - } - Header::File(req) => { - operations::request_file::receiver(&node, req, event).await?; - } - } - - Ok::<_, ()>(()) - }); - } - Event::Shutdown => break, - _ => {} - } - } - } - } - - error!( - "Manager event stream closed! The core is unstable from this point forward!" - ); - } - }); - } -} diff --git a/core/src/p2p/protocol.rs b/core/src/p2p/protocol.rs index 492542968..ff9a89c24 100644 --- a/core/src/p2p/protocol.rs +++ b/core/src/p2p/protocol.rs @@ -1,8 +1,5 @@ -use sd_p2p::{ - proto::{decode, encode}, - spaceblock::{Range, SpaceblockRequests, SpaceblockRequestsError}, -}; - +use sd_p2p_block::{Range, SpaceblockRequests, SpaceblockRequestsError}; +use sd_p2p_proto::{decode, encode}; use thiserror::Error; use tokio::io::{AsyncRead, AsyncReadExt}; use uuid::Uuid; diff --git a/core/src/p2p/sync/mod.rs b/core/src/p2p/sync/mod.rs index 51a500161..54187e8e5 100644 --- a/core/src/p2p/sync/mod.rs +++ b/core/src/p2p/sync/mod.rs @@ -5,10 +5,7 @@ use crate::{ sync::{self, GetOpsArgs}, }; -use sd_p2p::{ - proto::{decode, encode}, - spacetunnel::Tunnel, -}; +use sd_p2p_proto::{decode, encode}; use sd_sync::CRDTOperation; use std::sync::Arc; @@ -17,16 +14,18 @@ use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; use tracing::*; use uuid::Uuid; -use super::{Header, P2PManager}; +use super::P2PManager; mod proto; pub use proto::*; pub use originator::run as originator; mod originator { + use crate::p2p::Header; + use super::*; use responder::tx as rx; - use sd_p2p::PeerStatus; + use sd_p2p_tunnel::Tunnel; pub mod tx { use super::*; @@ -84,28 +83,19 @@ mod originator { /// REMEMBER: This only syncs one direction! pub async fn run(library_id: Uuid, sync: &Arc, p2p: &Arc) { - let service = p2p.get_library_service(&library_id).unwrap(); - - // TODO: Deduplicate any duplicate peer ids -> This is an edge case but still - for (remote_identity, status) in service.get_state() { - let PeerStatus::Connected = status else { + for (remote_identity, peer) in p2p.get_library_instances(&library_id) { + if !peer.is_connected() { continue; }; let sync = sync.clone(); - let p2p = p2p.clone(); - let service = service.clone(); tokio::spawn(async move { debug!( "Alerting peer '{remote_identity:?}' of new sync events for library '{library_id:?}'" ); - let mut stream = service - .connect(p2p.manager.clone(), &remote_identity) - .await - .map_err(|_| ()) - .unwrap(); // TODO: handle providing incorrect peer id + let mut stream = peer.new_stream().await.unwrap(); stream .write_all(&Header::Sync(library_id).to_bytes()) diff --git a/core/src/p2p/sync/proto.rs b/core/src/p2p/sync/proto.rs index 883708fcc..e586b631a 100644 --- a/core/src/p2p/sync/proto.rs +++ b/core/src/p2p/sync/proto.rs @@ -1,4 +1,4 @@ -use sd_p2p::proto::decode; +use sd_p2p_proto::decode; use tokio::io::{AsyncRead, AsyncReadExt}; // will probs have more variants in future diff --git a/core/src/util/maybe_undefined.rs b/core/src/util/maybe_undefined.rs index 4fe585efc..832b1d761 100644 --- a/core/src/util/maybe_undefined.rs +++ b/core/src/util/maybe_undefined.rs @@ -14,6 +14,11 @@ pub enum MaybeUndefined { } impl MaybeUndefined { + // `Undefined` will return `true` else `false`. + pub fn is_undefined(&self) -> bool { + matches!(self, Self::Undefined) + } + // `Null | Value(T)` will return `true` else `false`. pub fn is_defined(&self) -> bool { !matches!(self, Self::Undefined) diff --git a/crates/cloud-api/Cargo.toml b/crates/cloud-api/Cargo.toml index 76378cf60..9daa89966 100644 --- a/crates/cloud-api/Cargo.toml +++ b/crates/cloud-api/Cargo.toml @@ -6,7 +6,7 @@ edition.workspace = true repository.workspace = true [dependencies] -sd-p2p = { path = "../p2p" } +sd-p2p2 = { path = "../p2p2" } reqwest = "0.11.22" serde.workspace = true serde_json.workspace = true diff --git a/crates/cloud-api/src/lib.rs b/crates/cloud-api/src/lib.rs index eac509bd7..ace66d389 100644 --- a/crates/cloud-api/src/lib.rs +++ b/crates/cloud-api/src/lib.rs @@ -3,7 +3,7 @@ pub mod auth; use std::{future::Future, sync::Arc}; use auth::OAuthToken; -use sd_p2p::spacetunnel::RemoteIdentity; +use sd_p2p2::RemoteIdentity; use serde::{Deserialize, Serialize}; use serde_json::json; use specta::Type; @@ -47,8 +47,11 @@ pub struct Instance { pub id: String, pub uuid: Uuid, pub identity: RemoteIdentity, + #[serde(rename = "nodeId")] pub node_id: Uuid, + #[serde(rename = "nodeName")] pub node_name: String, + #[serde(rename = "nodePlatform")] pub node_platform: u8, } @@ -197,10 +200,11 @@ pub mod library { use super::*; #[derive(Debug, Deserialize)] - pub struct Response { + pub struct CreateResult { pub id: String, } + #[allow(clippy::too_many_arguments)] pub async fn exec( config: RequestConfig, library_id: Uuid, @@ -210,7 +214,7 @@ pub mod library { node_id: Uuid, node_name: &str, node_platform: u8, - ) -> Result { + ) -> Result { let Some(auth_token) = config.auth_token else { return Err(Error("Authentication required".to_string())); }; diff --git a/crates/p2p-block/Cargo.toml b/crates/p2p-block/Cargo.toml new file mode 100644 index 000000000..858489493 --- /dev/null +++ b/crates/p2p-block/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "sd-p2p-block" +version = "0.1.0" +authors = ["Oscar Beaumont "] +license.workspace = true +edition.workspace = true +repository.workspace = true + +[dependencies] +sd-p2p2 = { path = "../p2p2" } +sd-p2p-proto = { path = "../p2p-proto" } +thiserror.workspace = true +tokio.workspace = true +tracing.workspace = true +uuid.workspace = true diff --git a/crates/p2p/src/spaceblock/block.rs b/crates/p2p-block/src/block.rs similarity index 98% rename from crates/p2p/src/spaceblock/block.rs rename to crates/p2p-block/src/block.rs index 7e552497a..be64f4e3c 100644 --- a/crates/p2p/src/spaceblock/block.rs +++ b/crates/p2p-block/src/block.rs @@ -58,7 +58,7 @@ impl<'a> Block<'a> { mod tests { use std::io::Cursor; - use crate::spaceblock::BlockSize; + use crate::BlockSize; use super::*; diff --git a/crates/p2p/src/spaceblock/block_size.rs b/crates/p2p-block/src/block_size.rs similarity index 100% rename from crates/p2p/src/spaceblock/block_size.rs rename to crates/p2p-block/src/block_size.rs diff --git a/crates/p2p/src/spaceblock/mod.rs b/crates/p2p-block/src/lib.rs similarity index 99% rename from crates/p2p/src/spaceblock/mod.rs rename to crates/p2p-block/src/lib.rs index ad252dfcd..8ffcd411d 100644 --- a/crates/p2p/src/spaceblock/mod.rs +++ b/crates/p2p-block/src/lib.rs @@ -1,3 +1,6 @@ +//! TODO +// TODO: Clippy lints here + //! Spaceblock is a file transfer protocol that uses a block based system to transfer files. //! This protocol is modelled after `SyncThing`'s BEP protocol. A huge thanks to it's original authors! //! You can read more about it here: @@ -21,10 +24,8 @@ use tokio::{ }; use tracing::debug; -use crate::{ - proto::{decode, encode}, - spacetime::UnicastStream, -}; +use sd_p2p2::UnicastStream; +use sd_p2p_proto::{decode, encode}; mod block; mod block_size; diff --git a/crates/p2p/src/spaceblock/sb_request.rs b/crates/p2p-block/src/sb_request.rs similarity index 99% rename from crates/p2p/src/spaceblock/sb_request.rs rename to crates/p2p-block/src/sb_request.rs index 35be4c2fa..81af2d0cc 100644 --- a/crates/p2p/src/spaceblock/sb_request.rs +++ b/crates/p2p-block/src/sb_request.rs @@ -4,7 +4,7 @@ use thiserror::Error; use tokio::io::{AsyncRead, AsyncReadExt}; use uuid::Uuid; -use crate::proto::{decode, encode}; +use sd_p2p_proto::{decode, encode}; use super::BlockSize; diff --git a/crates/p2p-proto/Cargo.toml b/crates/p2p-proto/Cargo.toml new file mode 100644 index 000000000..8ade7a363 --- /dev/null +++ b/crates/p2p-proto/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "sd-p2p-proto" +version = "0.1.0" +authors = ["Oscar Beaumont "] +license.workspace = true +edition.workspace = true +repository.workspace = true + +[dependencies] +ed25519-dalek = "2.1.0" +thiserror.workspace = true +tokio = { workspace = true, features = ["io-util"] } +uuid.workspace = true diff --git a/crates/p2p/src/proto.rs b/crates/p2p-proto/src/lib.rs similarity index 86% rename from crates/p2p/src/proto.rs rename to crates/p2p-proto/src/lib.rs index e9f0f75fc..0a8af1fe5 100644 --- a/crates/p2p/src/proto.rs +++ b/crates/p2p-proto/src/lib.rs @@ -2,11 +2,22 @@ //! //! Eventually these will be deprecated by macros but I can't find one which supports large payloads (basically it needs to write to async stream not in-memory bufffer) -> Binario is my own prototype of a Rust library to do this but it's not prod ready yet. //! + use thiserror::Error; use uuid::Uuid; +// TODO: Remove this from this crate cause it's a leak of responsibility. +#[derive(Debug, Error)] +#[error(transparent)] +pub enum SpaceTunnelIdentityErr { + #[error("{0}")] + Darlek(#[from] ed25519_dalek::ed25519::Error), + #[error("Invalid key length")] + InvalidKeyLength, +} + pub mod decode { - use crate::spacetunnel::IdentityErr; + use crate::SpaceTunnelIdentityErr; use super::{Error, Uuid}; use tokio::io::{AsyncRead, AsyncReadExt}; @@ -20,7 +31,7 @@ pub mod decode { #[error("NameFormatError({0})")] NameFormatError(#[from] std::string::FromUtf8Error), #[error("InvalidRemoteIdentity({0})")] - InvalidRemoteIdentity(#[from] IdentityErr), + InvalidRemoteIdentity(#[from] SpaceTunnelIdentityErr), } /// Deserialize uuid as it's fixed size data. diff --git a/crates/p2p-tunnel/Cargo.toml b/crates/p2p-tunnel/Cargo.toml new file mode 100644 index 000000000..734abd59e --- /dev/null +++ b/crates/p2p-tunnel/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "sd-p2p-tunnel" +version = "0.1.0" +authors = ["Oscar Beaumont "] +license.workspace = true +edition.workspace = true +repository.workspace = true + +[dependencies] +sd-p2p2 = { path = "../p2p2" } +tokio = { workspace = true, features = ["io-util"] } diff --git a/crates/p2p/src/spacetunnel/mod.rs b/crates/p2p-tunnel/src/lib.rs similarity index 67% rename from crates/p2p/src/spacetunnel/mod.rs rename to crates/p2p-tunnel/src/lib.rs index 2edf932b8..6353219ec 100644 --- a/crates/p2p/src/spacetunnel/mod.rs +++ b/crates/p2p-tunnel/src/lib.rs @@ -1,7 +1,6 @@ //! A system for creating encrypted tunnels between peers over untrusted connections. -mod identity; mod tunnel; -pub use identity::*; +pub use sd_p2p2::{Identity, IdentityErr, RemoteIdentity}; pub use tunnel::*; diff --git a/crates/p2p/src/spacetunnel/tunnel.rs b/crates/p2p-tunnel/src/tunnel.rs similarity index 97% rename from crates/p2p/src/spacetunnel/tunnel.rs rename to crates/p2p-tunnel/src/tunnel.rs index 7edd81ed2..fa6db3c73 100644 --- a/crates/p2p/src/spacetunnel/tunnel.rs +++ b/crates/p2p-tunnel/src/tunnel.rs @@ -6,7 +6,7 @@ use std::{ use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, ReadBuf}; -use crate::spacetime::UnicastStream; +use sd_p2p2::UnicastStream; #[derive(Debug)] pub struct Tunnel { diff --git a/crates/p2p/examples/basic.rs b/crates/p2p/examples/basic.rs deleted file mode 100644 index 82e4fc7be..000000000 --- a/crates/p2p/examples/basic.rs +++ /dev/null @@ -1,141 +0,0 @@ -// use std::{collections::HashMap, env, time::Duration}; - -// use sd_p2p::{Event, Keypair, Manager, Metadata}; -// use tokio::{io::AsyncReadExt, time::sleep}; -// use tracing::{debug, error, info}; - -// #[derive(Debug, Clone)] -// pub struct PeerMetadata { -// name: String, -// } - -// impl Metadata for PeerMetadata { -// fn to_hashmap(self) -> HashMap { -// HashMap::from([("name".to_owned(), self.name)]) -// } - -// fn from_hashmap(data: &HashMap) -> Result -// where -// Self: Sized, -// { -// Ok(Self { -// name: data -// .get("name") -// .ok_or_else(|| { -// "DNS record for field 'name' missing. Unable to decode 'PeerMetadata'!" -// .to_owned() -// })? -// .to_owned(), -// }) -// } -// } - -// #[tokio::main] -// async fn main() { -// tracing_subscriber::fmt() -// .with_env_filter( -// tracing_subscriber::EnvFilter::from_default_env() -// .add_directive("basic=trace".parse().unwrap()) -// .add_directive("sd-p2p=trace".parse().unwrap()) -// .add_directive("info".parse().unwrap()), -// ) -// .try_init() -// .unwrap(); - -// let keypair = Keypair::generate(); - -// let metadata_manager = MetadataManager::new(PeerMetadata { -// name: "TODO".to_string(), -// }); - -// let (manager, mut stream) = Manager::new("p2p-demo", &keypair, Default::default()) -// .await -// .unwrap(); - -// info!( -// "Node '{}' is now online listening at addresses: {:?}", -// manager.identity(), -// stream.listen_addrs() -// ); - -// tokio::spawn(async move { -// let mut shutdown = false; -// // Your application must keeping poll this stream to keep the P2P system running -// while let Some(event) = stream.next().await { -// match event { -// // TODO: Refactor example to use `Service` struct -// // Event::PeerDiscovered(event) => { -// // println!( -// // "Discovered peer by id '{}' with address '{:?}' and metadata: {:?}", -// // event.peer_id, event.addresses, event.metadata -// // ); -// // event.dial().await; // We connect to everyone we find on the network. Your app will probs wanna restrict this! -// // } -// Event::PeerMessage(mut event) => { -// debug!("Peer '{}' established unicast stream", event.identity); - -// tokio::spawn(async move { -// let mut buf = [0; 100]; -// let n = event.stream.read(&mut buf).await.unwrap(); -// println!("GOT UNICAST: {:?}", std::str::from_utf8(&buf[..n]).unwrap()); -// }); -// } -// Event::PeerBroadcast(mut event) => { -// debug!("Peer '{}' established broadcast stream", event.identity); - -// tokio::spawn(async move { -// let mut buf = [0; 100]; -// let n = event.stream.read(&mut buf).await.unwrap(); -// println!( -// "GOT BROADCAST: {:?}", -// std::str::from_utf8(&buf[..n]).unwrap() -// ); -// }); -// } -// Event::Shutdown => { -// info!("Manager shutdown!"); -// shutdown = true; -// break; -// } -// _ => debug!("event: {:?}", event), -// } -// } - -// if !shutdown { -// error!("Manager event stream closed! The core is unstable from this point forward!"); -// // process.exit(1); // TODO: Should I? -// } -// }); - -// if env::var("PING").as_deref() != Ok("skip") { -// let manager = manager.clone(); -// tokio::spawn(async move { -// sleep(Duration::from_millis(500)).await; - -// // Send pings to every client every 3 second after startup -// loop { -// sleep(Duration::from_secs(3)).await; -// manager -// .broadcast( -// format!("Hello World From {}", keypair.peer_id()) -// .as_bytes() -// .to_vec(), -// ) -// .await; -// debug!("Sent ping broadcast to all connected peers!"); -// } -// }); -// } - -// // TODO: proper shutdown -// // https://docs.rs/ctrlc/latest/ctrlc/ -// // https://docs.rs/system_shutdown/latest/system_shutdown/ - -// tokio::time::sleep(Duration::from_secs(100)).await; - -// manager.shutdown().await; // It is super highly recommended to shutdown the manager before exiting your application so an Mdns update can be broadcasted -// } - -fn main() { - todo!("TODO: Update example"); -} diff --git a/crates/p2p/src/discovery/manager.rs b/crates/p2p/src/discovery/manager.rs deleted file mode 100644 index d216fed18..000000000 --- a/crates/p2p/src/discovery/manager.rs +++ /dev/null @@ -1,156 +0,0 @@ -use std::{ - collections::{HashMap, HashSet}, - future::poll_fn, - net::SocketAddr, - sync::{Arc, PoisonError, RwLock}, - task::Poll, -}; - -use libp2p::PeerId; -use tokio::sync::{broadcast, mpsc}; -use tracing::trace; - -use crate::{spacetunnel::RemoteIdentity, ManagerConfig, Mdns, ServiceEventInternal}; - -type ServiceName = String; - -pub type ListenAddrs = HashSet; -pub type State = Arc>; - -/// `DiscoveryManager` controls all user-defined [Service]'s and connects them with the network through mDNS and other discovery protocols -pub struct DiscoveryManager { - pub(crate) state: State, - pub(crate) listen_addrs: ListenAddrs, - pub(crate) application_name: &'static str, - pub(crate) identity: RemoteIdentity, - pub(crate) peer_id: PeerId, - pub(crate) mdns: Option, - // TODO: Split these off `DiscoveryManagerState` and parse around on their own struct??? - pub(crate) do_broadcast_rx: broadcast::Receiver<()>, - pub(crate) service_shutdown_rx: mpsc::Receiver, -} - -impl DiscoveryManager { - pub(crate) fn new( - application_name: &'static str, - identity: RemoteIdentity, - peer_id: PeerId, - config: &ManagerConfig, - state: State, - service_shutdown_rx: mpsc::Receiver, - ) -> Result { - let mut mdns = None; - if config.enabled { - mdns = Some(Mdns::new(application_name, identity, peer_id)?); - } - - let do_broadcast_rx = state - .read() - .unwrap_or_else(PoisonError::into_inner) - .do_broadcast - .subscribe(); - - Ok(Self { - state, - listen_addrs: Default::default(), - application_name, - identity, - peer_id, - mdns, - do_broadcast_rx, - service_shutdown_rx, - }) - } - - /// is called on changes to `self.services` to make sure all providers update their records - pub(crate) fn do_advertisement(&mut self) { - trace!("Broadcasting new service records"); - - if let Some(mdns) = &mut self.mdns { - mdns.do_advertisement(&self.listen_addrs, &self.state); - } - } - - pub(crate) async fn poll(&mut self) { - tokio::select! { - _ = self.do_broadcast_rx.recv() => self.do_advertisement(), - service_name = self.service_shutdown_rx.recv() => { - if let Some(service_name) = service_name { - let mut state = self.state.write().unwrap_or_else(PoisonError::into_inner); - state.services.remove(&service_name); - state.discovered.remove(&service_name); - state.known.remove(&service_name); - } - - // TODO - - self.do_advertisement(); - } - () = poll_fn(|cx| { - if let Some(mdns) = &mut self.mdns { - return mdns.poll(cx, &self.listen_addrs, &self.state); - } - - Poll::Pending - }) => {}, - } - } - - pub(crate) fn shutdown(&self) { - if let Some(mdns) = &self.mdns { - mdns.shutdown(); - } - } -} - -#[derive(Debug, Clone)] -#[allow(clippy::type_complexity)] -pub struct DiscoveryManagerState { - /// A list of services the current node is advertising w/ their metadata - pub(crate) services: HashMap< - ServiceName, - ( - broadcast::Sender<(String, ServiceEventInternal)>, - // Will be `None` prior to the first `.set` call - Option>, - ), - >, - /// A map of organically discovered peers - pub(crate) discovered: HashMap>, - /// A map of peers we know about. These may be connected or not avaiable. - /// This is designed around the Relay/NAT hole punching service where we need to emit who we wanna discover - /// Note: this may contain duplicates with `discovered` as they will *not* be removed from here when found - pub(crate) known: HashMap>, - /// Used to trigger an rebroadcast. This should be called when mutating this struct. - /// You are intended to clone out of this instead of locking the whole struct's `RwLock` each time you wanna use it. - /// This is a channel with a capacity of 1. If sending fails we know someone else has already requested broadcast and we can ignore the error. - pub(crate) do_broadcast: broadcast::Sender<()>, - /// Used to trigger the removal of a `Service`. This is used in the `impl Drop for Service` - /// You are intended to clone out of this instead of locking the whole struct's `RwLock` each time you wanna use it. - pub(crate) service_shutdown_tx: mpsc::Sender, -} - -impl DiscoveryManagerState { - #[must_use] - pub fn new() -> (Arc>, mpsc::Receiver) { - let (service_shutdown_tx, service_shutdown_rx) = mpsc::channel(10); - - ( - Arc::new(RwLock::new(Self { - services: Default::default(), - discovered: Default::default(), - known: Default::default(), - do_broadcast: broadcast::channel(1).0, - service_shutdown_tx, - })), - service_shutdown_rx, - ) - } -} - -#[derive(Debug, Clone)] -pub struct DiscoveredPeerCandidate { - pub(crate) peer_id: PeerId, - pub(crate) meta: HashMap, - pub(crate) addresses: Vec, -} diff --git a/crates/p2p/src/discovery/mdns.rs b/crates/p2p/src/discovery/mdns.rs deleted file mode 100644 index 979a38e4d..000000000 --- a/crates/p2p/src/discovery/mdns.rs +++ /dev/null @@ -1,367 +0,0 @@ -use std::{ - collections::HashMap, - net::SocketAddr, - pin::Pin, - str::FromStr, - sync::PoisonError, - task::{Context, Poll}, - thread::sleep, - time::Duration, -}; - -use futures_core::Stream; -use libp2p::{ - futures::{FutureExt, StreamExt}, - PeerId, -}; -use mdns_sd::{ServiceDaemon, ServiceEvent, ServiceInfo}; -use streamunordered::{StreamUnordered, StreamYield}; -use tokio::time::{sleep_until, Instant, Sleep}; -use tracing::{error, trace, warn}; - -use crate::{ - spacetunnel::RemoteIdentity, DiscoveredPeerCandidate, ListenAddrs, ServiceEventInternal, State, -}; - -/// TODO -const MDNS_READVERTISEMENT_INTERVAL: Duration = Duration::from_secs(60); // Every minute re-advertise - -pub struct Mdns { - identity: RemoteIdentity, - peer_id: PeerId, - service_name: String, - advertised_services: Vec, - mdns_daemon: ServiceDaemon, - next_mdns_advertisement: Pin>, - // This is an ugly workaround for: https://github.com/keepsimple1/mdns-sd/issues/145 - mdns_rx: StreamUnordered, - // This is hacky but it lets us go from service name back to `RemoteIdentity` when removing the service. - // During service removal we only have the service name (not metadata) but during service discovery we insert into this map. - tracked_services: HashMap, -} - -struct TrackedService { - service_name: String, - identity: RemoteIdentity, -} - -impl Mdns { - pub(crate) fn new( - application_name: &'static str, - identity: RemoteIdentity, - peer_id: PeerId, - ) -> Result { - let mdns_daemon = ServiceDaemon::new()?; - - Ok(Self { - identity, - peer_id, - service_name: format!("_{application_name}._udp.local."), - advertised_services: Vec::new(), - mdns_daemon, - next_mdns_advertisement: Box::pin(sleep_until(Instant::now())), // Trigger an advertisement immediately - mdns_rx: StreamUnordered::new(), - tracked_services: HashMap::new(), - }) - } - - /// Do an mdns advertisement to the network. - pub(super) fn do_advertisement(&mut self, listen_addrs: &ListenAddrs, state: &State) { - trace!("doing mDNS advertisement!"); - - // TODO: Second stage rate-limit - - let mut ports_to_service = HashMap::new(); - for addr in listen_addrs { - ports_to_service - .entry(addr.port()) - .or_insert_with(Vec::new) - .push(addr.ip()); - } - - // This method takes `&mut self` so we know we have exclusive access to `advertised_services` - let mut advertised_services_to_remove = self.advertised_services.clone(); - - let state = state.read().unwrap_or_else(PoisonError::into_inner); - for (port, ips) in ports_to_service { - for (service_name, (_, metadata)) in &state.services { - let Some(metadata) = metadata else { - continue; - }; - - let mut meta = metadata.clone(); - meta.insert("__peer_id".into(), self.peer_id.to_string()); - meta.insert("__service".into(), service_name.to_string()); - meta.insert("__identity".into(), self.identity.to_string()); - - // The max length of an MDNS record is painful so we just hash the data to come up with a pseudo-random but deterministic value. - // The full values are stored within TXT records. - let my_name = String::from_utf8_lossy(&base91::slice_encode( - sha256::digest(format!("{}_{}", service_name, self.identity)).as_bytes(), - ))[..63] - .to_string(); - - let service_domain = format!("_{service_name}._sub.{}", self.service_name); - let service = match ServiceInfo::new( - &service_domain, - &my_name[..63], // 63 as long as the mDNS spec will allow us - &format!("{}.{}.", service_name, self.identity), // TODO: Should this change??? - &*ips, - port, - Some(meta.clone()), // TODO: Prevent the user defining a value that overflows a DNS record - ) { - Ok(service) => service, // TODO: .enable_addr_auto(), // TODO: using autoaddrs or not??? - Err(err) => { - warn!("error creating mdns service info: {}", err); - continue; - } - }; - - let service_name = service.get_fullname().to_string(); - advertised_services_to_remove.retain(|s| *s != service_name); - self.advertised_services.push(service_name); - - if !self - .mdns_rx - .iter_with_token() - .any(|(s, _)| s.1 == service_domain) - { - let service = match self.mdns_daemon.browse(&service_domain) { - Ok(v) => v, - Err(err) => { - error!("error browsing mdns service: {}", err); - return; - } - }; - self.mdns_rx - .insert(MdnsRecv(service.into_stream(), service_domain)); - } - - // TODO: Do a proper diff and remove old services - trace!("advertising mdns service: {:?}", service); - match self.mdns_daemon.register(service) { - Ok(()) => {} - Err(err) => warn!("error registering mdns service: {}", err), - } - } - } - - for service_domain in advertised_services_to_remove { - if let Some((_, token)) = self - .mdns_rx - .iter_with_token() - .find(|(s, _)| s.1 == service_domain) - { - Pin::new(&mut self.mdns_rx).remove(token); - } - if let Err(err) = self.mdns_daemon.unregister(&service_domain) { - warn!("error unregistering mdns service: {}", err); - } - } - - // If mDNS advertisement is not queued in future, queue one - if self.next_mdns_advertisement.is_elapsed() { - self.next_mdns_advertisement = - Box::pin(sleep_until(Instant::now() + MDNS_READVERTISEMENT_INTERVAL)); - } - } - - pub(crate) fn poll( - &mut self, - cx: &mut Context<'_>, - listen_addrs: &ListenAddrs, - state: &State, - ) -> Poll<()> { - let mut is_pending = false; - while !is_pending { - match self.next_mdns_advertisement.poll_unpin(cx) { - Poll::Ready(()) => self.do_advertisement(listen_addrs, state), - Poll::Pending => is_pending = true, - } - - match self.mdns_rx.poll_next_unpin(cx) { - Poll::Ready(Some((result, _))) => match result { - StreamYield::Item(event) => self.on_event(event, state), - StreamYield::Finished(_) => {} - }, - Poll::Ready(None) => {} - Poll::Pending => is_pending = true, - } - } - - Poll::Pending - } - - fn on_event(&mut self, event: ServiceEvent, state: &State) { - match event { - ServiceEvent::SearchStarted(_) => {} - ServiceEvent::ServiceFound(_, _) => {} - ServiceEvent::ServiceResolved(info) => { - let Some(service_name) = info.get_properties().get("__service") else { - warn!( - "resolved mDNS peer advertising itself with missing '__service' metadata" - ); - return; - }; - let service_name = service_name.val_str(); - - let Some(identity) = info.get_properties().get("__identity") else { - warn!( - "resolved mDNS peer advertising itself with missing '__identity' metadata" - ); - return; - }; - let identity = identity.val_str(); - - println!("\t {:?} {:?}", info.get_fullname(), self.service_name); // TODO - - // if !service_type.ends_with(&self.service_name) { - // warn!( - // "resolved mDNS peer advertising itself with invalid service type '{service_type}'" - // ); - // return; - // } - - let Ok(identity) = RemoteIdentity::from_str(identity) else { - warn!("resolved peer advertising itself with an invalid RemoteIdentity('{identity}')"); - return; - }; - - // Prevent discovery of the current peer. - if identity == self.identity { - return; - } - - self.tracked_services.insert( - info.get_fullname().to_string(), - TrackedService { - service_name: service_name.to_string(), - identity, - }, - ); - - let mut meta = info - .get_properties() - .iter() - .map(|v| (v.key().to_owned(), v.val_str().to_owned())) - .collect::>(); - - let Some(peer_id) = meta.remove("__peer_id") else { - warn!( - "resolved mDNS peer advertising itself with missing '__peer_id' metadata" - ); - return; - }; - let Ok(peer_id) = PeerId::from_str(&peer_id) else { - warn!( - "resolved mDNS peer advertising itself with invalid '__peer_id' metadata" - ); - return; - }; - - let mut state = state.write().unwrap_or_else(PoisonError::into_inner); - - if let Some((tx, _)) = state.services.get_mut(service_name) { - if let Err(err) = tx.send(( - service_name.to_string(), - ServiceEventInternal::Discovered { - identity, - metadata: meta.clone(), - }, - )) { - warn!( - "error sending mDNS service event to '{service_name}' channel: {err}" - ); - } - } else { - warn!( - "mDNS service '{service_name}' is missing from 'state.services'. This is likely a bug!" - ); - } - - if let Some(discovered) = state.discovered.get_mut(service_name) { - discovered.insert( - identity, - DiscoveredPeerCandidate { - peer_id, - meta, - addresses: info - .get_addresses() - .iter() - .map(|addr| SocketAddr::new(*addr, info.get_port())) - .collect(), - }, - ); - } else { - warn!("mDNS service '{service_name}' is missing from 'state.discovered'. This is likely a bug!"); - } - } - ServiceEvent::ServiceRemoved(_, fullname) => { - let Some(TrackedService { - service_name, - identity, - }) = self.tracked_services.remove(&fullname) - else { - warn!( - "resolved mDNS peer deadvertising itself without having been discovered!" - ); - return; - }; - let mut state = state.write().unwrap_or_else(PoisonError::into_inner); - - if let Some((tx, _)) = state.services.get_mut(&service_name) { - if let Err(err) = tx.send(( - service_name.to_string(), - ServiceEventInternal::Expired { identity }, - )) { - warn!("error sending mDNS service event '{service_name}': {err}"); - } - } else { - warn!( - "mDNS service '{service_name}' is missing from 'state.services'. This is likely a bug!" - ); - } - - if let Some(discovered) = state.discovered.get_mut(&service_name) { - discovered.remove(&identity); - } else { - warn!("mDNS service '{service_name}' is missing from 'state.discovered'. This is likely a bug!"); - } - } - ServiceEvent::SearchStopped(_) => {} - } - } - - pub(crate) fn shutdown(&self) { - for service in &self.advertised_services { - self.mdns_daemon - .unregister(service) - .map_err(|err| { - error!("error removing mdns service '{service}': {err}"); - }) - .ok(); - } - - // TODO: Without this mDNS is not sending it goodbye packets without a timeout. Try and remove this cause it makes shutdown slow. - sleep(Duration::from_millis(100)); - - match self.mdns_daemon.shutdown() { - Ok(chan) => { - let _ = chan.recv(); - } - Err(err) => { - error!("error shutting down mdns daemon: {err}"); - } - } - } -} - -struct MdnsRecv(flume::r#async::RecvStream<'static, ServiceEvent>, String); - -impl Stream for MdnsRecv { - type Item = ServiceEvent; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.0.poll_next_unpin(cx) - } -} diff --git a/crates/p2p/src/discovery/mod.rs b/crates/p2p/src/discovery/mod.rs deleted file mode 100644 index 4eeb89ff7..000000000 --- a/crates/p2p/src/discovery/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -mod manager; -mod mdns; -mod service; - -pub use manager::*; -pub use mdns::*; -pub use service::*; diff --git a/crates/p2p/src/discovery/service.rs b/crates/p2p/src/discovery/service.rs deleted file mode 100644 index e669086b1..000000000 --- a/crates/p2p/src/discovery/service.rs +++ /dev/null @@ -1,311 +0,0 @@ -use std::{ - collections::HashMap, - marker::PhantomData, - pin::Pin, - sync::{Arc, PoisonError, RwLock}, - task::{Context, Poll}, -}; - -use futures_core::Stream; -use libp2p::futures::StreamExt; -use pin_project_lite::pin_project; -use thiserror::Error; -use tokio::sync::{broadcast, mpsc}; -use tokio_stream::wrappers::{errors::BroadcastStreamRecvError, BroadcastStream}; -use tracing::warn; - -use crate::{ - spacetime::{UnicastStream, UnicastStreamError}, - spacetunnel::RemoteIdentity, - DiscoveredPeer, DiscoveryManagerState, Manager, Metadata, -}; - -/// A Service represents a thing your application exposes to the network that can be discovered and connected to. -pub struct Service { - name: String, - state: Arc>, - do_broadcast: broadcast::Sender<()>, - service_shutdown_tx: mpsc::Sender, - manager: Arc, - phantom: PhantomData TMeta>, -} - -impl Service { - // Construct a new service. This will not cause an advertisement until [Self::update] is called! - pub fn new( - name: impl Into, - manager: Arc, - ) -> Result { - let name = name.into(); - let state = manager.discovery_state.clone(); - let (do_broadcast, service_shutdown_tx) = { - let mut state = state.write().unwrap_or_else(PoisonError::into_inner); - if state.services.contains_key(&name) { - return Err(ErrDuplicateServiceName); - } - state.discovered.insert(name.clone(), Default::default()); - state - .services - .insert(name.clone(), (broadcast::channel(20).0, Default::default())); - ( - state.do_broadcast.clone(), - state.service_shutdown_tx.clone(), - ) - }; - - // TODO: We call this but it won't have metadata set so it won't actually expose it - // However, it must be called to properly setup the listener (at least right now) - do_broadcast.send(()).ok(); - - Ok(Self { - name, - state, - do_broadcast, - service_shutdown_tx, - manager, - phantom: PhantomData, - }) - } - - #[must_use] - pub fn name(&self) -> &str { - &self.name - } - - pub fn update(&self, meta: TMeta) { - if let Some((_, services_meta)) = self - .state - .write() - .unwrap_or_else(PoisonError::into_inner) - .services - .get_mut(&self.name) - { - let meta = meta.to_hashmap(); - let did_change = services_meta.as_ref().is_some_and(|v| *v == meta); - *services_meta = Some(meta); - - if did_change { - self.do_broadcast.send(()).ok(); - } - } else { - warn!( - "Service::update called on non-existent service '{}'. This indicates a major bug in P2P!", - self.name - ); - } - } - - pub fn get_state(&self) -> HashMap { - let connected = self - .manager - .state - .read() - .unwrap_or_else(PoisonError::into_inner) - .connected - .values() - .map(|remote_identity| (*remote_identity, PeerStatus::Connected)) - .collect::>(); - - let state = self.state.read().unwrap_or_else(PoisonError::into_inner); - state - .known - .get(&self.name) - .into_iter() - .flatten() - .map(|remote_identity| (*remote_identity, PeerStatus::Unavailable)) - // We do these after the `Unavailable` to replace the keys that are in both - .chain(connected) - .chain( - state - .discovered - .get(&self.name) - .into_iter() - .flatten() - .map(|(remote_identity, _)| (*remote_identity, PeerStatus::Discovered)), - ) - .collect::>() - } - - pub fn add_known(&self, identity: Vec) { - self.state - .write() - .unwrap_or_else(PoisonError::into_inner) - .known - .entry(self.name.clone()) - .or_default() - .extend(identity); - - // TODO: Probally signal to discovery manager that we have new known peers -> This will be need for Relay but not for mDNS - } - - // TODO: Remove in favor of `get_state` maybe??? - pub fn get_discovered(&self) -> Vec> { - self.state - .read() - .unwrap_or_else(PoisonError::into_inner) - .discovered - .get(&self.name) - .into_iter() - .flatten() - .filter_map(|(i, p)| { - let metadata = match TMeta::from_hashmap(&p.meta) { - Ok(m) => m, - Err(err) => { - warn!("Failed to deserialize metadata for peer '{i:?}': {err}"); - return None; - } - }; - - Some(DiscoveredPeer { - identity: *i, - peer_id: p.peer_id, - metadata, - addresses: p.addresses.clone(), - }) - }) - .collect::>() - } - - pub async fn connect( - &self, - manager: Arc, - identity: &RemoteIdentity, - ) -> Result { - let candidate = { - let state = self.state.read().unwrap_or_else(PoisonError::into_inner); - let (_, candidate) = state - .discovered - .get(&self.name) - .ok_or(UnicastStreamError::ErrPeerIdNotFound(*identity))? - .iter() - .find(|(i, _)| *i == identity) - .ok_or(UnicastStreamError::ErrPeerIdNotFound(*identity))?; - candidate.clone() - }; - - let stream = manager.stream_inner(candidate.peer_id).await?; // TODO: handle providing incorrect peer id - Ok(stream) - } - - #[allow(clippy::panic)] // This is a `.expect` (which is allowd) but with formatting - pub fn listen(&self) -> ServiceSubscription { - ServiceSubscription { - name: self.name.clone(), - rx: BroadcastStream::new( - self.state - .read() - .unwrap_or_else(PoisonError::into_inner) - .services - .get(&self.name) - .unwrap_or_else(|| panic!("Service '{}' not found in service map", self.name)) - .0 - .subscribe(), - ), - phantom: PhantomData, - } - } -} - -impl Drop for Service { - fn drop(&mut self) { - if self - .service_shutdown_tx - .try_send(self.name.clone()) - .is_err() - { - // TODO: This will happen on shutdown due to the shutdown order. Try and fix that! - // Functionally all services are shutdown by the manager so this is a cosmetic fix. - warn!( - "Service::drop could not be called on '{}'. This indicates contention on the service shutdown channel and will result in out-of-date services being broadcasted.", - self.name - ); - } - } -} - -#[derive(Debug, Error)] -#[error("a service has already been mounted with this name")] -pub struct ErrDuplicateServiceName; - -#[derive(Debug, Clone, Copy)] -#[cfg_attr(feature = "specta", derive(specta::Type))] -#[cfg_attr(feature = "serde", derive(serde::Serialize))] -pub enum PeerStatus { - Unavailable, - Discovered, - Connected, -} - -#[derive(Debug, Clone)] -#[cfg_attr(feature = "specta", derive(specta::Type))] -#[cfg_attr(feature = "serde", derive(serde::Serialize))] -pub enum ServiceEvent { - Discovered { - identity: RemoteIdentity, - metadata: TMeta, - }, - Expired { - identity: RemoteIdentity, - }, -} - -// Type-erased version of [ServiceEvent]. -#[derive(Debug, Clone)] -pub enum ServiceEventInternal { - Discovered { - identity: RemoteIdentity, - metadata: HashMap, - }, - Expired { - identity: RemoteIdentity, - }, -} - -impl TryFrom for ServiceEvent { - type Error = String; - - fn try_from(value: ServiceEventInternal) -> Result { - Ok(match value { - ServiceEventInternal::Discovered { identity, metadata } => Self::Discovered { - identity, - metadata: TMeta::from_hashmap(&metadata)?, - }, - ServiceEventInternal::Expired { identity } => Self::Expired { identity }, - }) - } -} - -pin_project! { - pub struct ServiceSubscription { - name: String, - rx: BroadcastStream<(String, ServiceEventInternal)>, - phantom: PhantomData, - } -} - -impl Stream for ServiceSubscription { - type Item = Result, BroadcastStreamRecvError>; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - loop { - return match self.rx.poll_next_unpin(cx) { - Poll::Ready(Some(Ok((name, event)))) => { - if name != self.name { - continue; - } - - match event.try_into() { - Ok(result) => Poll::Ready(Some(Ok(result))), - Err(err) => { - warn!("error decoding into TMeta for service '{name}': {err}"); - continue; // TODO: This could *technically* cause stravation. Should this error be thrown outta the stream instead? - } - } - } - Poll::Ready(Some(Err(err))) => Poll::Ready(Some(Err(err))), - Poll::Ready(None) => Poll::Ready(None), - Poll::Pending => Poll::Pending, - }; - } - } -} diff --git a/crates/p2p/src/event.rs b/crates/p2p/src/event.rs deleted file mode 100644 index c72d86059..000000000 --- a/crates/p2p/src/event.rs +++ /dev/null @@ -1,39 +0,0 @@ -use std::{net::SocketAddr, sync::Arc}; - -use crate::{spacetime::UnicastStream, spacetunnel::RemoteIdentity, ConnectedPeer, Manager}; - -/// represents an event coming from the network manager. -/// This is useful for updating your UI when stuff changes on the backend. -/// You can also interact with some events to cause an event. -#[derive(Debug)] -pub enum Event { - /// add a network interface on this node to listen for - AddListenAddr(SocketAddr), - /// remove a network interface from this node so that we don't listen to it - RemoveListenAddr(SocketAddr), - /// communication was established with a peer. - /// Theere could actually be multiple connections under the hood but we smooth it over in this API. - PeerConnected(ConnectedPeer), - /// communication was lost with a peer. - PeerDisconnected(RemoteIdentity), - /// the peer has opened a new unicast substream - PeerMessage(PeerMessageEvent), - /// the node is shutting down - Shutdown, -} - -#[derive(Debug)] -pub struct PeerMessageEvent { - pub stream_id: u64, - pub identity: RemoteIdentity, - pub manager: Arc, - pub stream: UnicastStream, - // Prevent manual creation by end-user - pub(crate) _priv: (), -} - -impl From for Event { - fn from(event: PeerMessageEvent) -> Self { - Self::PeerMessage(event) - } -} diff --git a/crates/p2p/src/lib.rs b/crates/p2p/src/lib.rs deleted file mode 100644 index 8c03d7cc0..000000000 --- a/crates/p2p/src/lib.rs +++ /dev/null @@ -1,26 +0,0 @@ -//! Rust Peer to Peer Networking Library -#![warn(clippy::all, clippy::unwrap_used, clippy::panic)] - -mod discovery; -mod event; -mod manager; -mod manager_stream; -mod peer; -pub mod proto; -pub mod spaceblock; -pub mod spacetime; -pub mod spacetunnel; -mod utils; - -pub use discovery::*; -pub use event::*; -pub use manager::*; -pub use manager_stream::*; -pub use peer::*; -pub use utils::*; - -// TODO: Remove this -#[doc(hidden)] -pub mod internal { - pub use libp2p::PeerId; -} diff --git a/crates/p2p/src/manager.rs b/crates/p2p/src/manager.rs deleted file mode 100644 index 24447451a..000000000 --- a/crates/p2p/src/manager.rs +++ /dev/null @@ -1,359 +0,0 @@ -use std::{ - collections::{HashMap, HashSet}, - convert::Infallible, - fmt, - net::SocketAddr, - sync::{ - atomic::{AtomicBool, AtomicU64}, - Arc, PoisonError, RwLock, - }, -}; - -use libp2p::{ - core::{muxing::StreamMuxerBox, transport::ListenerId, ConnectedPoint}, - PeerId, SwarmBuilder, Transport, -}; -use serde::{Deserialize, Serialize}; -use specta::Type; -use thiserror::Error; -use tokio::sync::{mpsc, oneshot}; -use tracing::{error, warn}; - -use crate::{ - spacetime::{SpaceTime, UnicastStream, UnicastStreamError}, - spacetunnel::{Identity, RemoteIdentity}, - DiscoveryManager, DiscoveryManagerState, Keypair, ManagerStream, ManagerStreamAction, - ManagerStreamAction2, -}; - -// State of the manager that may infrequently change -// These are broken out so updates to them can be done in sync (With single RwLock lock) -#[derive(Debug)] -pub struct DynamicManagerState { - pub(crate) config: ManagerConfig, - pub(crate) ipv4_listener_id: Option>, - pub(crate) ipv4_port: Option, - pub(crate) ipv6_listener_id: Option>, - pub(crate) ipv6_port: Option, - // A map of connected clients. - // This includes both inbound and outbound connections! - pub(crate) connected: HashMap, - // TODO: Removing this would be nice. It's a hack to things working after removing the `PeerId` from public API. - pub(crate) connections: HashMap, -} - -/// Is the core component of the P2P system that holds the state and delegates actions to the other components -pub struct Manager { - pub(crate) peer_id: PeerId, - pub(crate) identity: Identity, - pub(crate) application_name: String, - pub(crate) stream_id: AtomicU64, - pub(crate) state: RwLock, - pub(crate) discovery_state: Arc>, - event_stream_tx: mpsc::Sender, - event_stream_tx2: mpsc::Sender, -} - -impl fmt::Debug for Manager { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Debug").finish() - } -} - -impl Manager { - /// create a new P2P manager. Please do your best to make the callback closures as fast as possible because they will slow the P2P event loop! - pub async fn new( - application_name: &'static str, - keypair: &Keypair, - config: ManagerConfig, - ) -> Result<(Arc, ManagerStream), ManagerError> { - application_name - .chars() - .all(|c| char::is_alphanumeric(c) || c == '-') - .then_some(()) - .ok_or(ManagerError::InvalidAppName)?; - - let peer_id = keypair.peer_id(); - let (event_stream_tx, event_stream_rx) = mpsc::channel(128); - let (event_stream_tx2, event_stream_rx2) = mpsc::channel(128); - - let config2 = config.clone(); - let (discovery_state, service_shutdown_rx) = DiscoveryManagerState::new(); - let this = Arc::new(Self { - application_name: format!("/{application_name}/spacetime/1.0.0"), - identity: keypair.to_identity(), - stream_id: AtomicU64::new(0), - state: RwLock::new(DynamicManagerState { - config, - ipv4_listener_id: None, - ipv4_port: None, - ipv6_listener_id: None, - ipv6_port: None, - connected: Default::default(), - connections: Default::default(), - }), - discovery_state, - peer_id, - event_stream_tx, - event_stream_tx2, - }); - - let mut swarm = ok(ok(SwarmBuilder::with_existing_identity(keypair.inner()) - .with_tokio() - .with_other_transport(|keypair| { - libp2p_quic::GenTransport::::new( - libp2p_quic::Config::new(keypair), - ) - .map(|(p, c), _| (p, StreamMuxerBox::new(c))) - .boxed() - })) - .with_behaviour(|_| SpaceTime::new(this.clone()))) - .build(); - - ManagerStream::refresh_listeners( - &mut swarm, - &mut this.state.write().unwrap_or_else(PoisonError::into_inner), - ); - - Ok(( - this.clone(), - ManagerStream { - discovery_manager: DiscoveryManager::new( - application_name, - this.identity.to_remote_identity(), - this.peer_id, - &config2, - this.discovery_state.clone(), - service_shutdown_rx, - )?, - manager: this, - event_stream_rx, - event_stream_rx2, - swarm, - queued_events: Default::default(), - shutdown: AtomicBool::new(false), - on_establish_streams: HashMap::new(), - }, - )) - } - - pub(crate) async fn emit(&self, event: ManagerStreamAction) { - match self.event_stream_tx.send(event).await { - Ok(()) => {} - Err(err) => warn!("error emitting event: {}", err), - } - } - - pub fn identity(&self) -> RemoteIdentity { - self.identity.to_remote_identity() - } - - pub fn libp2p_peer_id(&self) -> PeerId { - self.peer_id - } - - pub async fn update_config(&self, config: ManagerConfig) { - self.emit(ManagerStreamAction::UpdateConfig(config)).await; - } - - pub async fn get_connected_peers(&self) -> Result, ()> { - let (tx, rx) = oneshot::channel(); - self.emit(ManagerStreamAction::GetConnectedPeers(tx)).await; - rx.await.map_err(|_| { - warn!("failed to get connected peers 3 times, returning error"); - }) - } - - // TODO: Maybe remove this? - pub async fn stream( - &self, - identity: RemoteIdentity, - ) -> Result { - let peer_id = { - let state = self - .discovery_state - .read() - .unwrap_or_else(PoisonError::into_inner); - - // TODO: This should not depend on a `Service` existing. Either we should store discovered peers separatly for this or we should remove this method (prefered). - state - .discovered - .iter() - .find_map(|(_, i)| i.iter().find(|(i, _)| **i == identity)) - .ok_or(UnicastStreamError::PeerIdNotFound)? - .1 - .peer_id - }; - - self.stream_inner(peer_id).await - } - - // TODO: Should this be private now that connections can be done through the `Service`. - // TODO: Does this need any timeouts to be added cause hanging forever is bad? - // be aware this method is `!Sync` so can't be used from rspc. // TODO: Can this limitation be removed? - #[allow(clippy::unused_unit)] // TODO: Remove this clippy override once error handling is added - pub(crate) async fn stream_inner( - &self, - peer_id: PeerId, - ) -> Result { - // TODO: With this system you can send to any random peer id. Can I reduce that by requiring `.connect(peer_id).unwrap().send(data)` or something like that. - let (tx, rx) = oneshot::channel(); - if let Err(err) = self - .event_stream_tx2 - .send(ManagerStreamAction2::StartStream(peer_id, tx)) - .await - { - warn!("error emitting event: {err}"); - }; - let stream = rx.await.map_err(|err| { - warn!("failed to queue establishing stream to peer '{peer_id}'!"); - UnicastStreamError::ErrManagerShutdown(err) - })?; - - stream.build(self, peer_id).await - } - - // TODO: Cleanup return type and this API in general - #[allow(clippy::type_complexity)] - pub fn get_debug_state( - &self, - ) -> ( - PeerId, - RemoteIdentity, - ManagerConfig, - HashMap, - HashSet, - HashMap>>, - HashMap< - String, - HashMap, Vec)>, - >, - HashMap>, - ) { - let state = self.state.read().unwrap_or_else(PoisonError::into_inner); - let discovery_state = self - .discovery_state - .read() - .unwrap_or_else(PoisonError::into_inner); - - ( - self.peer_id, - self.identity.to_remote_identity(), - state.config.clone(), - state.connected.clone(), - state.connections.keys().copied().collect(), - discovery_state - .services - .iter() - .map(|(k, v)| (k.clone(), v.1.clone())) - .collect(), - discovery_state - .discovered - .iter() - .map(|(k, v)| { - ( - k.clone(), - v.clone() - .iter() - .map(|(k, v)| (*k, (v.peer_id, v.meta.clone(), v.addresses.clone()))) - .collect::>(), - ) - }) - .collect(), - discovery_state.known.clone(), - ) - } - - pub fn status(&self) -> P2PStatus { - let state = self.state.read().unwrap_or_else(PoisonError::into_inner); - P2PStatus { - ipv4: match state.ipv4_listener_id.clone() { - Some(Ok(_)) => match state.ipv4_port { - Some(port) => ListenerStatus::Listening { port }, - None => ListenerStatus::Enabling, - }, - Some(Err(error)) => ListenerStatus::Error { error }, - None => ListenerStatus::Disabled, - }, - ipv6: match state.ipv6_listener_id.clone() { - Some(Ok(_)) => match state.ipv6_port { - Some(port) => ListenerStatus::Listening { port }, - None => ListenerStatus::Enabling, - }, - Some(Err(error)) => ListenerStatus::Error { error }, - None => ListenerStatus::Disabled, - }, - } - } - - pub async fn shutdown(&self) { - let (tx, rx) = oneshot::channel(); - if self - .event_stream_tx - .send(ManagerStreamAction::Shutdown(tx)) - .await - .is_ok() - { - rx.await.unwrap_or_else(|_| { - warn!("Error receiving shutdown signal to P2P Manager!"); - }); // Await shutdown so we don't kill the app before the Mdns broadcast - } else { - warn!("p2p was already shutdown, skipping..."); - } - } -} - -#[derive(Error, Debug)] -pub enum ManagerError { - #[error( - "the application name you application provided is invalid. Ensure it is alphanumeric!" - )] - InvalidAppName, - #[error("error with mdns discovery: {0}")] - Mdns(#[from] mdns_sd::Error), - // #[error("todo")] - // Manager(#[from] ManagerError), -} - -/// The configuration for the P2P Manager -/// DO NOT MAKE BREAKING CHANGES - This is embedded in the `node_config.json` -/// For future me: `Keypair` is not on here cause hot reloading it hard. -#[derive(Debug, Clone, Serialize, Deserialize, Type)] -pub struct ManagerConfig { - // Enable or disable the P2P layer - pub enabled: bool, - // `None` will chose a random free port on startup - #[serde(default, skip_serializing_if = "Option::is_none")] - pub port: Option, -} - -impl Default for ManagerConfig { - fn default() -> Self { - Self { - enabled: true, - port: None, - } - } -} - -#[derive(Serialize, Debug, Type)] -pub struct P2PStatus { - ipv4: ListenerStatus, - ipv6: ListenerStatus, -} - -#[derive(Serialize, Debug, Type)] -#[serde(tag = "status")] -pub enum ListenerStatus { - Disabled, - Enabling, - Listening { port: u16 }, - Error { error: String }, -} - -fn ok(v: Result) -> T { - match v { - Ok(v) => v, - Err(_) => unreachable!(), - } -} diff --git a/crates/p2p/src/manager_stream.rs b/crates/p2p/src/manager_stream.rs deleted file mode 100644 index 37ad1a9d3..000000000 --- a/crates/p2p/src/manager_stream.rs +++ /dev/null @@ -1,469 +0,0 @@ -use std::{ - collections::{HashMap, HashSet, VecDeque}, - fmt, - net::{Ipv4Addr, Ipv6Addr, SocketAddr}, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, PoisonError, - }, -}; - -use libp2p::{ - futures::StreamExt, - swarm::{ - dial_opts::{DialOpts, PeerCondition}, - NotifyHandler, SwarmEvent, ToSwarm, - }, - PeerId, Swarm, -}; -use tokio::sync::{mpsc, oneshot}; -use tracing::{debug, error, info, trace, warn}; - -use crate::{ - quic_multiaddr_to_socketaddr, socketaddr_to_quic_multiaddr, - spacetime::{OutboundRequest, SpaceTime, UnicastStreamBuilder}, - spacetunnel::RemoteIdentity, - DiscoveryManager, DynamicManagerState, Event, Manager, ManagerConfig, Mdns, -}; - -/// TODO -/// -/// This is `Sync` so it can be used from within rspc. -pub enum ManagerStreamAction { - /// TODO - GetConnectedPeers(oneshot::Sender>), - /// Tell the [`libp2p::Swarm`](libp2p::Swarm) to establish a new connection to a peer. - Dial { - peer_id: PeerId, - addresses: Vec, - }, - /// Update the config. This requires the `libp2p::Swarm` - UpdateConfig(ManagerConfig), - /// the node is shutting down. The `ManagerStream` should convert this into `Event::Shutdown` - Shutdown(oneshot::Sender<()>), -} - -/// TODO: Get ride of this and merge into `ManagerStreamAction` without breaking rspc procedures -/// -/// This is `!Sync` so can't be used from within rspc. -pub enum ManagerStreamAction2 { - /// Events are returned to the application via the `ManagerStream::next` method. - Event(Event), - /// Events are returned to the application via the `ManagerStream::next` method. - Events(Vec), - /// TODO - StartStream(PeerId, oneshot::Sender), -} - -impl fmt::Debug for ManagerStreamAction { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("ManagerStreamAction") - } -} - -impl fmt::Debug for ManagerStreamAction2 { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("ManagerStreamAction2") - } -} - -impl From for ManagerStreamAction2 { - fn from(event: Event) -> Self { - Self::Event(event) - } -} - -/// TODO -#[must_use = "streams do nothing unless polled"] -pub struct ManagerStream { - pub(crate) manager: Arc, - pub(crate) event_stream_rx: mpsc::Receiver, - pub(crate) event_stream_rx2: mpsc::Receiver, - pub(crate) swarm: Swarm, - pub(crate) discovery_manager: DiscoveryManager, - pub(crate) queued_events: VecDeque, - pub(crate) shutdown: AtomicBool, - pub(crate) on_establish_streams: HashMap>, -} - -impl ManagerStream { - /// Setup the libp2p listeners based on the manager config. - /// This method will take care of removing old listeners if needed - pub(crate) fn refresh_listeners(swarm: &mut Swarm, state: &mut DynamicManagerState) { - if state.config.enabled { - let port = state.config.port.unwrap_or(0); - - if state.ipv4_listener_id.is_none() || matches!(state.ipv6_listener_id, Some(Err(_))) { - state.ipv4_listener_id = Some( - swarm - .listen_on(socketaddr_to_quic_multiaddr(&SocketAddr::from(( - Ipv4Addr::UNSPECIFIED, - port, - )))) - .map(|id| { - debug!("registered ipv4 listener: {id:?}"); - id - }) - .map_err(|err| { - error!("failed to register ipv4 listener on port {port}: {err}"); - err.to_string() - }), - ); - } - - if state.ipv4_listener_id.is_none() || matches!(state.ipv6_listener_id, Some(Err(_))) { - state.ipv6_listener_id = Some( - swarm - .listen_on(socketaddr_to_quic_multiaddr(&SocketAddr::from(( - Ipv6Addr::UNSPECIFIED, - port, - )))) - .map(|id| { - debug!("registered ipv6 listener: {id:?}"); - id - }) - .map_err(|err| { - error!("failed to register ipv6 listener on port {port}: {err}"); - err.to_string() - }), - ); - } - } else { - if let Some(Ok(listener)) = state.ipv4_listener_id.take() { - debug!("removing ipv4 listener with id '{:?}'", listener); - swarm.remove_listener(listener); - } - - if let Some(Ok(listener)) = state.ipv6_listener_id.take() { - debug!("removing ipv6 listener with id '{:?}'", listener); - swarm.remove_listener(listener); - } - } - } -} - -enum EitherManagerStreamAction { - A(ManagerStreamAction), - B(ManagerStreamAction2), -} - -impl From for EitherManagerStreamAction { - fn from(event: ManagerStreamAction) -> Self { - Self::A(event) - } -} - -impl From for EitherManagerStreamAction { - fn from(event: ManagerStreamAction2) -> Self { - Self::B(event) - } -} - -impl ManagerStream { - pub fn listen_addrs(&self) -> HashSet { - self.discovery_manager.listen_addrs.clone() - } - - // Your application should keep polling this until `None` is received or the P2P system will be halted. - pub async fn next(&mut self) -> Option { - // We loop polling internal services until an event comes in that needs to be sent to the parent application. - loop { - assert!(!self.shutdown.load(Ordering::Relaxed), "`ManagerStream::next` called after shutdown event. This is a mistake in your application code!"); - - if let Some(event) = self.queued_events.pop_front() { - return Some(event); - } - tokio::select! { - () = self.discovery_manager.poll() => { - continue; - }, - event = self.event_stream_rx.recv() => { - // If the sender has shut down we return `None` to also shut down too. - if let Some(event) = self.handle_manager_stream_action(event?.into()).await { - return Some(event); - } - } - event = self.event_stream_rx2.recv() => { - // If the sender has shut down we return `None` to also shut down too. - if let Some(event) = self.handle_manager_stream_action(event?.into()).await { - return Some(event); - } - } - event = self.swarm.select_next_some() => { - match event { - SwarmEvent::Behaviour(event) => { - if let Some(event) = self.handle_manager_stream_action(event.into()).await { - if let Event::Shutdown { .. } = event { - self.shutdown.store(true, Ordering::Relaxed); - } - - return Some(event); - } - }, - SwarmEvent::ConnectionEstablished { peer_id, .. } => { - if let Some(streams) = self.on_establish_streams.remove(&peer_id) { - for event in streams { - self.swarm - .behaviour_mut() - .pending_events - .push_back(ToSwarm::NotifyHandler { - peer_id, - handler: NotifyHandler::Any, - event - }); - } - } - }, - SwarmEvent::ConnectionClosed { peer_id, num_established, .. } => { - if num_established == 0 { - let mut state = self.manager.state.write() - .unwrap_or_else(PoisonError::into_inner); - if state - .connected - .remove(&peer_id).is_none() || state.connections.remove(&peer_id).is_none() { - warn!("unable to remove unconnected client from connected map. This indicates a bug!"); - } - } - }, - SwarmEvent::IncomingConnection { local_addr, .. } => debug!("incoming connection from '{}'", local_addr), - SwarmEvent::IncomingConnectionError { local_addr, error, .. } => warn!("handshake error with incoming connection from '{}': {}", local_addr, error), - SwarmEvent::OutgoingConnectionError { peer_id, error, .. } => warn!("error establishing connection with '{:?}': {}", peer_id, error), - SwarmEvent::NewListenAddr { listener_id, address, .. } => { - let addr = match quic_multiaddr_to_socketaddr(address.clone()) { - Ok(addr) => addr, - Err(err) => { - warn!("error passing listen address '{address:?}': {err:?}"); - continue; - } - }; - - { - let mut state = self.manager.state.write().unwrap_or_else(PoisonError::into_inner); - if let Some(Ok(lid)) = &state.ipv4_listener_id { - if *lid == listener_id { - state.ipv4_port = Some(addr.port()); - } - } - - if let Some(Ok(lid)) = &state.ipv6_listener_id { - if *lid == listener_id { - state.ipv6_port = Some(addr.port()); - } - } - } - - match quic_multiaddr_to_socketaddr(address) { - Ok(addr) => { - trace!("listen address added: {}", addr); - self.discovery_manager.listen_addrs.insert(addr); - self.discovery_manager.do_advertisement(); - return Some(Event::AddListenAddr(addr)); - }, - Err(err) => { - warn!("error passing listen address: {}", err); - continue; - } - } - }, - SwarmEvent::ExpiredListenAddr { address, .. } => { - match quic_multiaddr_to_socketaddr(address) { - Ok(addr) => { - trace!("listen address expired: {}", addr); - self.discovery_manager.listen_addrs.remove(&addr); - self.discovery_manager.do_advertisement(); - return Some(Event::RemoveListenAddr(addr)); - }, - Err(err) => { - warn!("error passing listen address: {}", err); - continue; - } - } - } - SwarmEvent::ListenerClosed { listener_id, addresses, reason } => { - trace!("listener '{:?}' was closed due to: {:?}", listener_id, reason); - for address in addresses { - match quic_multiaddr_to_socketaddr(address) { - Ok(addr) => { - trace!("listen address closed: {}", addr); - self.discovery_manager.listen_addrs.remove(&addr); - self.queued_events.push_back(Event::RemoveListenAddr(addr)); - }, - Err(err) => { - warn!("error passing listen address: {}", err); - continue; - } - } - } - - // The `loop` will restart and begin returning the events from `queued_events`. - } - SwarmEvent::ListenerError { listener_id, error } => warn!("listener '{:?}' reported a non-fatal error: {}", listener_id, error), - SwarmEvent::Dialing { .. } => {}, - _ => {} - } - } - } - } - } - - async fn handle_manager_stream_action( - &mut self, - event: EitherManagerStreamAction, - ) -> Option { - match event { - EitherManagerStreamAction::A(event) => match event { - ManagerStreamAction::GetConnectedPeers(response) => { - let result = { - let state = self - .manager - .state - .read() - .unwrap_or_else(PoisonError::into_inner); - - self.swarm - .connected_peers() - .filter_map(|v| { - let v = state.connected.get(v); - - if v.is_none() { - warn!("Error converting PeerId({v:?}) into RemoteIdentity. This is likely a bug in P2P."); - } - - v.copied() - }) - .collect::>() - }; - - response - .send(result) - .map_err(|_| { - error!("Error sending response to `GetConnectedPeers` request! Sending was dropped!"); - }) - .ok(); - } - ManagerStreamAction::Dial { peer_id, addresses } => { - match self.swarm.dial( - DialOpts::peer_id(peer_id) - .condition(PeerCondition::Disconnected) - .addresses(addresses.iter().map(socketaddr_to_quic_multiaddr).collect()) - .build(), - ) { - Ok(()) => {} - Err(err) => warn!( - "error dialing peer '{}' with addresses '{:?}': {}", - peer_id, addresses, err - ), - } - } - ManagerStreamAction::UpdateConfig(config) => { - let mut state = self - .manager - .state - .write() - .unwrap_or_else(PoisonError::into_inner); - - state.config = config; - Self::refresh_listeners(&mut self.swarm, &mut state); - - if !state.config.enabled { - if let Some(mdns) = self.discovery_manager.mdns.take() { - drop(state); - mdns.shutdown(); - } - } else if self.discovery_manager.mdns.is_none() { - match Mdns::new( - self.discovery_manager.application_name, - self.discovery_manager.identity, - self.discovery_manager.peer_id, - ) { - Ok(mdns) => { - self.discovery_manager.mdns = Some(mdns); - self.discovery_manager.do_advertisement(); - } - Err(err) => { - error!("error starting mDNS service: {err:?}"); - self.discovery_manager.mdns = None; - - // state.config.enabled = false; - // TODO: Properly reset the UI state cause it will be outa sync - } - } - } - - // drop(state); - } - ManagerStreamAction::Shutdown(tx) => { - info!("Shutting down P2P Manager..."); - self.discovery_manager.shutdown(); - tx.send(()).unwrap_or_else(|()| { - warn!("Error sending shutdown signal to P2P Manager!"); - }); - - return Some(Event::Shutdown); - } - }, - EitherManagerStreamAction::B(event) => match event { - ManagerStreamAction2::Event(event) => return Some(event), - ManagerStreamAction2::Events(mut events) => { - let first = events.pop(); - - for event in events { - self.queued_events.push_back(event); - } - - return first; - } - ManagerStreamAction2::StartStream(peer_id, tx) => { - if !self.swarm.connected_peers().any(|v| *v == peer_id) { - let Some(addresses) = self - .discovery_manager - .state - .read() - .unwrap_or_else(PoisonError::into_inner) - .discovered - .iter() - .find_map(|(_, service)| { - service.iter().find_map(|(_, v)| { - (v.peer_id == peer_id).then(|| v.addresses.clone()) - }) - }) - else { - warn!("Peer '{}' is not connected and no addresses are known for it! Skipping connection creation...", peer_id); - return None; - }; - - match self.swarm.dial( - DialOpts::peer_id(peer_id) - .condition(PeerCondition::Disconnected) - .addresses( - addresses.iter().map(socketaddr_to_quic_multiaddr).collect(), - ) - .build(), - ) { - Ok(()) => {} - Err(err) => warn!( - "error dialing peer '{}' with addresses '{:?}': {}", - peer_id, addresses, err - ), - } - - self.on_establish_streams - .entry(peer_id) - .or_default() - .push(OutboundRequest::Unicast(tx)); - } else { - self.swarm.behaviour_mut().pending_events.push_back( - ToSwarm::NotifyHandler { - peer_id, - handler: NotifyHandler::Any, - event: OutboundRequest::Unicast(tx), - }, - ); - } - } - }, - } - - None - } -} diff --git a/crates/p2p/src/peer.rs b/crates/p2p/src/peer.rs deleted file mode 100644 index 60a2e5e23..000000000 --- a/crates/p2p/src/peer.rs +++ /dev/null @@ -1,48 +0,0 @@ -use std::{ - fmt::{self, Formatter}, - net::SocketAddr, -}; - -use libp2p::PeerId; - -use crate::{spacetunnel::RemoteIdentity, Metadata}; - -/// Represents a discovered peer. -/// This is held by [Manager] to keep track of discovered peers -#[derive(Clone)] -#[cfg_attr(feature = "serde", derive(serde::Serialize))] -#[cfg_attr(feature = "specta", derive(specta::Type))] -pub struct DiscoveredPeer { - /// the public key of the discovered peer - pub identity: RemoteIdentity, - /// the libp2p peer id of the discovered peer - #[serde(skip)] - pub peer_id: PeerId, - /// get the metadata of the discovered peer - pub metadata: TMeta, - /// get the addresses of the discovered peer - pub addresses: Vec, -} - -// `Manager` impls `Debug` but it causes infinite loop and stack overflow, lmao. -impl fmt::Debug for DiscoveredPeer { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - f.debug_struct("DiscoveredPeer") - .field("peer_id", &self.peer_id) - .field("metadata", &self.metadata) - .field("addresses", &self.addresses) - .finish() - } -} - -/// Represents a connected peer. -/// This is held by [Manager] to keep track of connected peers -#[derive(Debug, Clone)] -#[cfg_attr(feature = "serde", derive(serde::Serialize))] -#[cfg_attr(feature = "specta", derive(specta::Type))] -pub struct ConnectedPeer { - /// get the identity of the discovered peer - pub identity: RemoteIdentity, - /// Did I open the connection? - pub establisher: bool, -} diff --git a/crates/p2p/src/spacetime/behaviour.rs b/crates/p2p/src/spacetime/behaviour.rs deleted file mode 100644 index ffa636454..000000000 --- a/crates/p2p/src/spacetime/behaviour.rs +++ /dev/null @@ -1,185 +0,0 @@ -use std::{ - collections::VecDeque, - sync::{Arc, PoisonError}, - task::{Context, Poll}, -}; - -use libp2p::{ - core::{ConnectedPoint, Endpoint}, - swarm::{ - derive_prelude::{ConnectionEstablished, ConnectionId, FromSwarm}, - ConnectionClosed, ConnectionDenied, NetworkBehaviour, THandler, THandlerInEvent, - THandlerOutEvent, ToSwarm, - }, - Multiaddr, -}; -use thiserror::Error; -use tracing::{debug, trace, warn}; - -use crate::{Event, Manager, ManagerStreamAction2}; - -use super::SpaceTimeConnection; - -/// Internal threshold for when to shrink the capacity -/// of empty queues. If the capacity of an empty queue -/// exceeds this threshold, the associated memory is -/// released. -pub const EMPTY_QUEUE_SHRINK_THRESHOLD: usize = 100; - -// TODO: Remove this? -#[derive(Debug, Error)] -pub enum OutboundFailure {} - -/// `SpaceTime` is a [`NetworkBehaviour`](libp2p_swarm::NetworkBehaviour) that implements the `SpaceTime` protocol. -/// This protocol sits under the application to abstract many complexities of 2 way connections and deals with authentication, chucking, etc. -pub struct SpaceTime { - pub(crate) manager: Arc, - pub(crate) pending_events: - VecDeque::ToSwarm, THandlerInEvent>>, -} - -impl SpaceTime { - /// intialise the fabric of space time - pub fn new(manager: Arc) -> Self { - Self { - manager, - pending_events: VecDeque::new(), - } - } -} - -impl NetworkBehaviour for SpaceTime { - type ConnectionHandler = SpaceTimeConnection; - type ToSwarm = ManagerStreamAction2; - - fn handle_established_inbound_connection( - &mut self, - _connection_id: ConnectionId, - peer_id: libp2p::PeerId, - _local_addr: &Multiaddr, - _remote_addr: &Multiaddr, - ) -> Result, ConnectionDenied> { - Ok(SpaceTimeConnection::new(peer_id, self.manager.clone())) - } - - fn handle_pending_outbound_connection( - &mut self, - _connection_id: ConnectionId, - _maybe_peer: Option, - _addresses: &[Multiaddr], - _effective_role: Endpoint, - ) -> Result, ConnectionDenied> { - // This should be unused but libp2p still calls it - Ok(vec![]) - } - - fn handle_established_outbound_connection( - &mut self, - _connection_id: ConnectionId, - peer_id: libp2p::PeerId, - _addr: &Multiaddr, - _role_override: Endpoint, - ) -> Result, ConnectionDenied> { - Ok(SpaceTimeConnection::new(peer_id, self.manager.clone())) - } - - fn on_swarm_event(&mut self, event: FromSwarm) { - match event { - FromSwarm::ConnectionEstablished(ConnectionEstablished { - peer_id, - endpoint, - other_established, - .. - }) => { - let address = match endpoint { - ConnectedPoint::Dialer { address, .. } => Some(address.clone()), - ConnectedPoint::Listener { .. } => None, - }; - trace!( - "connection establishing with peer '{}' found at '{:?}'; peer has {} active connections", - peer_id, address, other_established - ); - self.manager - .state - .write() - .unwrap_or_else(PoisonError::into_inner) - .connections - .insert(peer_id, (endpoint.clone(), other_established)); - } - FromSwarm::ConnectionClosed(ConnectionClosed { - peer_id, - remaining_established, - .. - }) => { - if remaining_established == 0 { - debug!("Disconnected from peer '{}'", peer_id); - let mut state = self - .manager - .state - .write() - .unwrap_or_else(PoisonError::into_inner); - - state.connections.remove(&peer_id); - if let Some(remote_identity) = state.connected.remove(&peer_id) { - self.pending_events.push_back(ToSwarm::GenerateEvent( - Event::PeerDisconnected(remote_identity).into(), - )); - } else { - warn!("Disconnected peer '{peer_id}' but was not connected. This likely indicates a bug!"); - } - } - } - FromSwarm::AddressChange(event) => { - debug!( - "Address change event: {:?} {:?} {:?} {:?}", - event.peer_id, event.connection_id, event.old, event.new - ); - } - FromSwarm::DialFailure(event) => { - if let Some(peer_id) = event.peer_id { - debug!("Dialing failure to peer '{}': {:?}", peer_id, event.error); - - // TODO - // If there are pending outgoing requests when a dial failure occurs, - // it is implied that we are not connected to the peer, since pending - // outgoing requests are drained when a connection is established and - // only created when a peer is not connected when a request is made. - // Thus these requests must be considered failed, even if there is - // another, concurrent dialing attempt ongoing. - // if let Some(pending) = self.pending_outbound_requests.remove(&peer_id) { - // for request in pending { - // self.pending_events - // .push_back(NetworkBehaviourAction::GenerateEvent( - // Event::OutboundFailure { - // peer_id, - // request_id: request.request_id, - // error: OutboundFailure::DialFailure, - // }, - // )); - // } - // } - } - } - _ => {} - } - } - - fn on_connection_handler_event( - &mut self, - _peer_id: libp2p::PeerId, - _connection: ConnectionId, - event: THandlerOutEvent, - ) { - self.pending_events.push_back(ToSwarm::GenerateEvent(event)); - } - - fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { - if let Some(ev) = self.pending_events.pop_front() { - return Poll::Ready(ev); - } else if self.pending_events.capacity() > EMPTY_QUEUE_SHRINK_THRESHOLD { - self.pending_events.shrink_to_fit(); - } - - Poll::Pending - } -} diff --git a/crates/p2p/src/spacetime/connection.rs b/crates/p2p/src/spacetime/connection.rs deleted file mode 100644 index 7afa0861c..000000000 --- a/crates/p2p/src/spacetime/connection.rs +++ /dev/null @@ -1,140 +0,0 @@ -use libp2p::{ - swarm::{ - handler::{ - ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, FullyNegotiatedInbound, - }, - SubstreamProtocol, - }, - PeerId, -}; -use std::{ - collections::VecDeque, - sync::Arc, - task::{Context, Poll}, - time::Duration, -}; -use tracing::error; - -use crate::{Manager, ManagerStreamAction2}; - -use super::{InboundProtocol, OutboundProtocol, OutboundRequest, EMPTY_QUEUE_SHRINK_THRESHOLD}; - -// TODO: Probs change this based on the ConnectionEstablishmentPayload -const SUBSTREAM_TIMEOUT: Duration = Duration::from_secs(10); // TODO: Tune value - -#[allow(clippy::type_complexity)] -pub struct SpaceTimeConnection { - peer_id: PeerId, - manager: Arc, - pending_events: VecDeque< - ConnectionHandlerEvent< - OutboundProtocol, - ::OutboundOpenInfo, - ::ToBehaviour, - // StreamUpgradeError, - >, - >, -} - -impl SpaceTimeConnection { - pub(super) fn new(peer_id: PeerId, manager: Arc) -> Self { - Self { - peer_id, - manager, - pending_events: VecDeque::new(), - } - } -} - -impl ConnectionHandler for SpaceTimeConnection { - type FromBehaviour = OutboundRequest; - type ToBehaviour = ManagerStreamAction2; - type InboundProtocol = InboundProtocol; - type OutboundProtocol = OutboundProtocol; - type OutboundOpenInfo = (); - type InboundOpenInfo = (); - - fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new( - InboundProtocol { - peer_id: self.peer_id, - manager: self.manager.clone(), - }, - (), - ) - .with_timeout(SUBSTREAM_TIMEOUT) - } - - fn on_behaviour_event(&mut self, req: Self::FromBehaviour) { - // TODO: Working keep alives - // self.keep_alive = KeepAlive::Yes; - // self.outbound.push_back(request); - - self.pending_events - .push_back(ConnectionHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new( - OutboundProtocol { - application_name: self.manager.application_name.clone(), - req, - identity: self.manager.identity.clone(), - }, - (), - ) // TODO: Use `info` here maybe to pass into about the client. Idk? - .with_timeout(SUBSTREAM_TIMEOUT), - }); - } - - fn connection_keep_alive(&self) -> bool { - true // TODO: Make this work how the old one did with storing it on `self` and updating on events - } - - fn poll( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll< - ConnectionHandlerEvent, - > { - if let Some(event) = self.pending_events.pop_front() { - return Poll::Ready(event); - } else if self.pending_events.capacity() > EMPTY_QUEUE_SHRINK_THRESHOLD { - self.pending_events.shrink_to_fit(); - } - - Poll::Pending - } - - // TODO: Which level we doing error handler?. On swarm, on Behavior or here??? - fn on_connection_event( - &mut self, - event: ConnectionEvent< - Self::InboundProtocol, - Self::OutboundProtocol, - Self::InboundOpenInfo, - Self::OutboundOpenInfo, - >, - ) { - match event { - ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound { - protocol, .. - }) => { - self.pending_events - .push_back(ConnectionHandlerEvent::NotifyBehaviour(protocol)); - } - ConnectionEvent::FullyNegotiatedOutbound(_) => {} - ConnectionEvent::DialUpgradeError(event) => { - error!("DialUpgradeError: {:#?}", event.error); - } - ConnectionEvent::ListenUpgradeError(event) => { - error!("DialUpgradeError: {:#?}", event.error); - - // TODO: If `event.error` close connection cause we don't "speak the same language"! - } - ConnectionEvent::AddressChange(_) => { - // TODO: Should we be telling `SpaceTime` to update it's info here or is it also getting this event? - } - ConnectionEvent::LocalProtocolsChange(_) => {} - ConnectionEvent::RemoteProtocolsChange(_) => {} - _ => {} - } - } -} diff --git a/crates/p2p/src/spacetime/libp2p.rs b/crates/p2p/src/spacetime/libp2p.rs deleted file mode 100644 index 9d613df19..000000000 --- a/crates/p2p/src/spacetime/libp2p.rs +++ /dev/null @@ -1,10 +0,0 @@ -//! This file contains of stuff to make libp2p work for us. They are fairly meaningless. - -#[derive(Clone)] -pub struct SpaceTimeProtocolName(pub String); - -impl AsRef for SpaceTimeProtocolName { - fn as_ref(&self) -> &str { - &self.0 - } -} diff --git a/crates/p2p/src/spacetime/mod.rs b/crates/p2p/src/spacetime/mod.rs deleted file mode 100644 index 6a1742086..000000000 --- a/crates/p2p/src/spacetime/mod.rs +++ /dev/null @@ -1,16 +0,0 @@ -//! `Spacetime` is just a fancy name for the protocol which sits between libp2p and the application built on this library. -//! This protocol sits under the application to abstract many complexities of 2 way connections and deals with authentication, chucking, etc. - -mod behaviour; -mod connection; -mod libp2p; -mod proto_inbound; -mod proto_outbound; -mod stream; - -pub use self::libp2p::*; -pub use behaviour::*; -pub use connection::*; -pub use proto_inbound::*; -pub use proto_outbound::*; -pub use stream::*; diff --git a/crates/p2p/src/spacetime/proto_inbound.rs b/crates/p2p/src/spacetime/proto_inbound.rs deleted file mode 100644 index a5fafcff4..000000000 --- a/crates/p2p/src/spacetime/proto_inbound.rs +++ /dev/null @@ -1,110 +0,0 @@ -use std::{ - future::Future, - pin::Pin, - sync::{atomic::Ordering, Arc, PoisonError}, -}; - -use libp2p::{ - core::{ConnectedPoint, UpgradeInfo}, - InboundUpgrade, PeerId, Stream, -}; -use tokio_util::compat::FuturesAsyncReadCompatExt; -use tracing::{debug, warn}; - -use crate::{ - spacetime::UnicastStream, ConnectedPeer, Event, Manager, ManagerStreamAction2, PeerMessageEvent, -}; - -use super::SpaceTimeProtocolName; - -pub struct InboundProtocol { - pub(crate) peer_id: PeerId, - pub(crate) manager: Arc, -} - -impl UpgradeInfo for InboundProtocol { - type Info = SpaceTimeProtocolName; - type InfoIter = [Self::Info; 1]; - - fn protocol_info(&self) -> Self::InfoIter { - [SpaceTimeProtocolName(self.manager.application_name.clone())] - } -} - -impl InboundUpgrade for InboundProtocol { - type Output = ManagerStreamAction2; - type Error = (); - type Future = Pin> + Send + 'static>>; - - fn upgrade_inbound(self, io: Stream, _: Self::Info) -> Self::Future { - let id = self.manager.stream_id.fetch_add(1, Ordering::Relaxed); - Box::pin(async move { - debug!( - "stream({}, {id}): accepting inbound connection", - self.peer_id - ); - - let io = io.compat(); - debug!("stream({}, {id}): unicast stream accepted", self.peer_id); - - let stream = match UnicastStream::new_inbound(self.manager.identity.clone(), io).await { - Ok(v) => v, - Err(err) => { - warn!( - "Failed to construct 'UnicastStream' with Peer('{}'): {err:?}", - self.peer_id - ); - return Err(()); - } - }; - - let establisher = { - let mut state = self - .manager - .state - .write() - .unwrap_or_else(PoisonError::into_inner); - - state - .connected - .insert(self.peer_id, stream.remote_identity()); - - match state.connections.get(&self.peer_id) { - Some((endpoint, 0)) => Some(match endpoint { - ConnectedPoint::Dialer { .. } => true, - ConnectedPoint::Listener { .. } => false, - }), - None => { - warn!("Error getting PeerId({})'s connection state. This indicates a bug in P2P", self.peer_id); - None - } - _ => None, - } - }; - - debug!( - "sending establishment request to peer '{}'", - stream.remote_identity() - ); - - let identity = stream.remote_identity(); - let mut events = vec![PeerMessageEvent { - stream_id: id, - identity, - manager: self.manager.clone(), - stream, - _priv: (), - } - .into()]; - - if let Some(establisher) = establisher { - events.push(Event::PeerConnected(ConnectedPeer { - identity, - establisher, - })); - } - - Ok(ManagerStreamAction2::Events(events)) - }) - } -} diff --git a/crates/p2p/src/spacetime/proto_outbound.rs b/crates/p2p/src/spacetime/proto_outbound.rs deleted file mode 100644 index 45e5d4af5..000000000 --- a/crates/p2p/src/spacetime/proto_outbound.rs +++ /dev/null @@ -1,54 +0,0 @@ -use std::future::{ready, Ready}; - -use libp2p::{core::UpgradeInfo, OutboundUpgrade, Stream}; -use tokio::sync::oneshot; -use tokio_util::compat::FuturesAsyncReadCompatExt; -use tracing::warn; - -use crate::spacetunnel::Identity; - -use super::{SpaceTimeProtocolName, UnicastStreamBuilder}; - -#[derive(Debug)] -pub enum OutboundRequest { - Unicast(oneshot::Sender), -} - -pub struct OutboundProtocol { - pub(crate) application_name: String, - pub(crate) req: OutboundRequest, - pub(crate) identity: Identity, -} - -impl UpgradeInfo for OutboundProtocol { - type Info = SpaceTimeProtocolName; - type InfoIter = [Self::Info; 1]; - - fn protocol_info(&self) -> Self::InfoIter { - [SpaceTimeProtocolName(self.application_name.clone())] - } -} - -impl OutboundUpgrade for OutboundProtocol { - type Output = (); - type Error = (); - type Future = Ready>; - - fn upgrade_outbound(self, io: Stream, _protocol: Self::Info) -> Self::Future { - let result = match self.req { - OutboundRequest::Unicast(sender) => { - // We write the discriminator to the stream in the `Manager::stream` method before returning the stream to the user to make async a tad nicer. - sender - .send(UnicastStreamBuilder::new( - self.identity.clone(), - io.compat(), - )) - .map_err(|err| { - warn!("error transmitting unicast stream: {err:?}"); - }) - } - }; - - ready(result) - } -} diff --git a/crates/p2p/src/spacetime/stream.rs b/crates/p2p/src/spacetime/stream.rs deleted file mode 100644 index e91225b9e..000000000 --- a/crates/p2p/src/spacetime/stream.rs +++ /dev/null @@ -1,200 +0,0 @@ -use std::{ - io::{self}, - pin::Pin, - sync::PoisonError, - task::{Context, Poll}, -}; - -use libp2p::{futures::AsyncWriteExt, PeerId, Stream}; -use thiserror::Error; -use tokio::{ - io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt as TokioAsyncWriteExt, ReadBuf}, - sync::oneshot, - time::{timeout, Duration}, -}; -use tokio_util::compat::Compat; - -use crate::{ - spacetunnel::{Identity, IdentityErr, RemoteIdentity, REMOTE_IDENTITY_LEN}, - Manager, -}; - -pub const CHALLENGE_LENGTH: usize = 32; -const ONE_MINUTE: Duration = Duration::from_secs(60); - -/// A unicast stream is a direct stream to a specific peer. -#[derive(Debug)] -#[allow(unused)] // TODO: Remove this lint override -pub struct UnicastStream { - io: Compat, - me: Identity, - remote: RemoteIdentity, -} - -// TODO: Utils for sending msgpack and stuff over the stream. -> Have a max size of reading buffers so we are less susceptible to DoS attacks. - -impl UnicastStream { - pub(crate) async fn new_inbound( - identity: Identity, - mut io: Compat, - ) -> Result { - // TODO: Finish this - // let mut challenge = [0u8; CHALLENGE_LENGTH]; - // io.read_exact(&mut challenge).await.unwrap(); // TODO: Timeout - // let nonce = ChaCha20Poly1305::generate_nonce(&mut OsRng); // 96-bits; unique per message - // let ciphertext = cipher.encrypt(&nonce, b"plaintext message".as_ref())?; - // let plaintext = cipher.decrypt(&nonce, ciphertext.as_ref())?; - - // TODO: THIS IS INSECURE!!!!! - // We are just sending strings of the public key without any verification the other party holds the private key. - let mut actual = [0; REMOTE_IDENTITY_LEN]; - match timeout(ONE_MINUTE, io.read_exact(&mut actual)).await { - Ok(r) => r?, - Err(_) => return Err(UnicastStreamError::Timeout), - }; - let remote = RemoteIdentity::from_bytes(&actual)?; - - match timeout( - ONE_MINUTE, - io.write_all(&identity.to_remote_identity().get_bytes()), - ) - .await - { - Ok(w) => w?, - Err(_) => return Err(UnicastStreamError::Timeout), - }; - - // TODO: Do we have something to compare against? I don't think so this is fine. - // if expected.get_bytes() != actual { - // panic!("Mismatch in remote identity!"); - // } - - Ok(Self { - io, - me: identity, - remote, - }) - } - - pub(crate) async fn new_outbound( - identity: Identity, - mut io: Compat, - ) -> Result { - // TODO: Use SPAKE not some handrolled insecure mess - // let challenge = rand::thread_rng().gen::<[u8; CHALLENGE_LENGTH]>(); - // self.0.write_all(&challenge).await?; - - // TODO: THIS IS INSECURE!!!!! - // We are just sending strings of the public key without any verification the other party holds the private key. - match timeout( - ONE_MINUTE, - io.write_all(&identity.to_remote_identity().get_bytes()), - ) - .await - { - Ok(w) => w?, - Err(_) => return Err(UnicastStreamError::Timeout), - }; - - let mut actual = [0; REMOTE_IDENTITY_LEN]; - match timeout(ONE_MINUTE, io.read_exact(&mut actual)).await { - Ok(r) => r?, - Err(_) => return Err(UnicastStreamError::Timeout), - }; - let remote = RemoteIdentity::from_bytes(&actual)?; - - // TODO: Do we have something to compare against? I don't think so this is fine. - // if expected.get_bytes() != actual { - // panic!("Mismatch in remote identity!"); - // } - - Ok(Self { - io, - me: identity, - remote, - }) - } - - #[must_use] - pub fn remote_identity(&self) -> RemoteIdentity { - self.remote - } - - pub async fn close(self) -> Result<(), io::Error> { - self.io.into_inner().close().await - } -} - -impl AsyncRead for UnicastStream { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut ReadBuf<'_>, - ) -> Poll> { - Pin::new(&mut self.get_mut().io).poll_read(cx, buf) - } -} - -impl AsyncWrite for UnicastStream { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - Pin::new(&mut self.get_mut().io).poll_write(cx, buf) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.get_mut().io).poll_flush(cx) - } - - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.get_mut().io).poll_shutdown(cx) - } -} - -#[derive(Debug, Error)] -pub enum UnicastStreamError { - #[error("io error: {0}")] - IoError(#[from] io::Error), - #[error("identity error: {0}")] - InvalidError(#[from] IdentityErr), - // TODO: Technically this error is from the manager - #[error("peer id not found")] - PeerIdNotFound, - #[error("error manager shutdown")] - ErrManagerShutdown(#[from] oneshot::error::RecvError), - #[error("error getting peer id for '{0}'")] - ErrPeerIdNotFound(RemoteIdentity), - #[error("timeout")] - Timeout, -} - -#[derive(Debug)] -pub struct UnicastStreamBuilder { - identity: Identity, - io: Compat, -} - -impl UnicastStreamBuilder { - pub(crate) fn new(identity: Identity, io: Compat) -> Self { - Self { identity, io } - } - - pub(crate) async fn build( - self, - manager: &Manager, - peer_id: PeerId, - ) -> Result { - let stream = UnicastStream::new_outbound(self.identity, self.io).await?; - - manager - .state - .write() - .unwrap_or_else(PoisonError::into_inner) - .connected - .insert(peer_id, stream.remote_identity()); - - Ok(stream) - } -} diff --git a/crates/p2p/src/utils/keypair.rs b/crates/p2p/src/utils/keypair.rs deleted file mode 100644 index 35a1a0a3f..000000000 --- a/crates/p2p/src/utils/keypair.rs +++ /dev/null @@ -1,63 +0,0 @@ -use ed25519_dalek::SigningKey; -use libp2p::identity::ed25519::{self}; -use serde::{Deserialize, Serialize}; - -use crate::spacetunnel::{Identity, RemoteIdentity}; - -#[derive(Debug, Clone)] -pub struct Keypair(ed25519::Keypair); - -impl Keypair { - #[must_use] - pub fn generate() -> Self { - Self(ed25519::Keypair::generate()) - } - - #[must_use] - pub fn to_identity(&self) -> Identity { - // This depends on libp2p implementation details which isn't great - SigningKey::from_keypair_bytes(&self.0.to_bytes()) - .expect("Failed to convert 'ed25519::Keypair' into 'SigningKey'. They should have an identical representation.") - .into() - } - - #[must_use] - pub fn to_remote_identity(&self) -> RemoteIdentity { - self.to_identity().to_remote_identity() - } - - // TODO: Make this `pub(crate)` - #[must_use] - pub fn peer_id(&self) -> libp2p::PeerId { - let pk: libp2p::identity::PublicKey = self.0.public().into(); - - libp2p::PeerId::from_public_key(&pk) - } - - #[must_use] - pub fn inner(&self) -> libp2p::identity::Keypair { - self.0.clone().into() - } -} - -impl Serialize for Keypair { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - serializer.serialize_bytes(&self.0.to_bytes()) - } -} - -impl<'de> Deserialize<'de> for Keypair { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - let mut bytes = Vec::::deserialize(deserializer)?; - Ok(Self( - ed25519::Keypair::try_from_bytes(bytes.as_mut_slice()) - .map_err(serde::de::Error::custom)?, - )) - } -} diff --git a/crates/p2p/src/utils/metadata.rs b/crates/p2p/src/utils/metadata.rs deleted file mode 100644 index 0c3e681b1..000000000 --- a/crates/p2p/src/utils/metadata.rs +++ /dev/null @@ -1,10 +0,0 @@ -use std::{collections::HashMap, fmt::Debug}; - -/// this trait must be implemented for the metadata type to allow it to be converted to MDNS DNS records. -pub trait Metadata: Debug + Clone + Send + Sync + 'static { - fn to_hashmap(self) -> HashMap; - - fn from_hashmap(data: &HashMap) -> Result - where - Self: Sized; -} diff --git a/crates/p2p/src/utils/mod.rs b/crates/p2p/src/utils/mod.rs deleted file mode 100644 index ad5998fcf..000000000 --- a/crates/p2p/src/utils/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -mod keypair; -mod metadata; -mod multiaddr; - -pub use keypair::*; -pub use metadata::*; -pub use multiaddr::*; diff --git a/crates/p2p/src/utils/multiaddr.rs b/crates/p2p/src/utils/multiaddr.rs deleted file mode 100644 index 6f2da3194..000000000 --- a/crates/p2p/src/utils/multiaddr.rs +++ /dev/null @@ -1,44 +0,0 @@ -use std::net::{IpAddr, SocketAddr}; - -use libp2p::{multiaddr::Protocol, Multiaddr}; - -// TODO: Turn these into From/Into impls on a wrapper type - -pub fn quic_multiaddr_to_socketaddr(m: Multiaddr) -> Result { - let mut addr_parts = m.iter(); - - let addr = match addr_parts.next() { - Some(Protocol::Ip4(addr)) => IpAddr::V4(addr), - Some(Protocol::Ip6(addr)) => IpAddr::V6(addr), - Some(proto) => { - return Err(format!( - "Invalid multiaddr. Segment 1 found protocol 'Ip4' or 'Ip6' but found '{proto}'" - )) - } - None => return Err("Invalid multiaddr. Segment 1 missing".to_string()), - }; - - let port = match addr_parts.next() { - Some(Protocol::Udp(port)) => port, - Some(proto) => { - return Err(format!( - "Invalid multiaddr. Segment 2 expected protocol 'Udp' but found '{proto}'" - )) - } - None => return Err("Invalid multiaddr. Segment 2 missing".to_string()), - }; - - Ok(SocketAddr::new(addr, port)) -} - -#[must_use] -pub fn socketaddr_to_quic_multiaddr(m: &SocketAddr) -> Multiaddr { - let mut addr = Multiaddr::empty(); - match m { - SocketAddr::V4(ip) => addr.push(Protocol::Ip4(*ip.ip())), - SocketAddr::V6(ip) => addr.push(Protocol::Ip6(*ip.ip())), - } - addr.push(Protocol::Udp(m.port())); - addr.push(Protocol::QuicV1); - addr -} diff --git a/crates/p2p/Cargo.toml b/crates/p2p2/Cargo.toml similarity index 79% rename from crates/p2p/Cargo.toml rename to crates/p2p2/Cargo.toml index 884d8ee0f..e4e96904b 100644 --- a/crates/p2p/Cargo.toml +++ b/crates/p2p2/Cargo.toml @@ -1,6 +1,6 @@ [package] -name = "sd-p2p" -version = "0.1.0" +name = "sd-p2p2" +version = "0.2.0" description = "Rust Peer to Peer Networking Library" authors = ["Oscar Beaumont "] readme = "README.md" @@ -8,17 +8,16 @@ license = { workspace = true } repository = { workspace = true } edition = { workspace = true } +# TODO: Remove features??? and dependencies + [features] default = [] -serde = [] specta = [] [dependencies] base64 = { workspace = true } pin-project-lite = { workspace = true } -serde = { workspace = true, features = [ - "derive", -] } # TODO: Optional or remove feature +serde = { workspace = true, features = ["derive"] } specta = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = [ @@ -41,14 +40,18 @@ if-watch = { version = "=3.2.0", features = [ ] } # Override the features of if-watch which is used by libp2p-quic libp2p = { version = "0.53.2", features = ["tokio", "serde"] } libp2p-quic = { version = "0.10.2", features = ["tokio"] } +libp2p-stream = "0.1.0-alpha" mdns-sd = "0.10.3" rand_core = { version = "0.6.4" } streamunordered = "0.5.3" zeroize = { version = "1.7.0", features = ["derive"] } base91 = "0.1.0" sha256 = "1.5.0" +stable-vec = "0.4.0" +hash_map_diff = "0.2.0" +sync_wrapper = "0.1.2" [dev-dependencies] -tokio = { workspace = true, features = ["rt-multi-thread"] } -tracing-subscriber = { workspace = true, features = ["env-filter"] } +tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } +tracing-subscriber = { version = "0.3.18" } uuid = { workspace = true, features = ["v4"] } diff --git a/crates/p2p/README.md b/crates/p2p2/README.md similarity index 100% rename from crates/p2p/README.md rename to crates/p2p2/README.md diff --git a/crates/p2p2/src/hooks.rs b/crates/p2p2/src/hooks.rs new file mode 100644 index 000000000..0126652e5 --- /dev/null +++ b/crates/p2p2/src/hooks.rs @@ -0,0 +1,130 @@ +use std::{collections::HashSet, fmt, net::SocketAddr, sync::Arc}; + +use flume::Sender; +use tokio::sync::oneshot; + +use crate::{Peer, RemoteIdentity}; + +#[derive(Debug, Clone)] +pub enum HookEvent { + /// `P2P::service` has changed + MetadataModified, + + /// A new listener was registered with the P2P system. + ListenerRegistered(ListenerId), + /// A listener's address was added. + ListenerAddrAdded(ListenerId, SocketAddr), + /// A listener's address was removed. + ListenerAddrRemoved(ListenerId, SocketAddr), + /// A listener was unregistered from the P2P system. + ListenerUnregistered(ListenerId), + + /// A peer was inserted into `P2P::peers` + /// This peer could have connected to or have been discovered by a hook. + PeerAvailable(Arc), + /// A peer was removed from `P2P::peers` + /// This is due to it no longer being discovered, containing no active connections or available connection methods. + PeerUnavailable(RemoteIdentity), + + /// A peer was discovered by a hook + /// This will fire for *every peer* per every *hook* that discovers it. + PeerDiscoveredBy(HookId, Arc), + /// A hook expired a peer + /// This will fire for *every peer* per every *hook* that discovers it. + PeerExpiredBy(HookId, RemoteIdentity), + + /// "Connections" are an internal concept to the P2P library but they will be automatically triggered by `Peer::new_stream`. + /// They are a concept users of the application may care about so they are exposed here. + + /// A new listener established a connection with a peer + PeerConnectedWith(ListenerId, Arc), + /// A connection closed with a peer. + PeerDisconnectedWith(ListenerId, RemoteIdentity), + + /// Your hook or the P2P system was told to shutdown. + Shutdown { + // We can detect when this guard is dropped, it doesn't need to be used. + _guard: ShutdownGuard, + }, +} + +#[derive(Debug)] +pub struct ShutdownGuard(pub(crate) Option>); + +impl ShutdownGuard { + pub(crate) fn new() -> (Self, oneshot::Receiver<()>) { + let (tx, rx) = oneshot::channel(); + (Self(Some(tx)), rx) + } +} + +impl Drop for ShutdownGuard { + fn drop(&mut self) { + if let Some(tx) = self.0.take() { + let _ = tx.send(()); + } + } +} + +impl Clone for ShutdownGuard { + fn clone(&self) -> Self { + Self(None) + } +} + +#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] +pub struct HookId(pub(crate) usize); + +#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)] +pub struct ListenerId(pub(crate) usize); + +impl From for HookId { + fn from(value: ListenerId) -> Self { + Self(value.0) + } +} + +#[derive(Debug)] +pub(crate) struct Hook { + /// A name used for debugging purposes. + pub(crate) name: &'static str, + /// A channel to send events to the hook. + /// This hooks implementing will be responsible for subscribing to this channel. + pub(crate) tx: Sender, + /// If this hook is a listener this will be set. + pub(crate) listener: Option, +} + +impl Hook { + pub fn send(&self, event: HookEvent) { + let _ = self.tx.send(event); + } + + pub fn acceptor(&self, id: ListenerId, peer: &Arc, addrs: &HashSet) { + if let Some(listener) = &self.listener { + (listener.acceptor.0)(id, peer, addrs); + } + } +} + +#[derive(Debug)] +pub(crate) struct ListenerData { + /// The address the listener is bound to. + /// These will be advertised by any discovery methods attached to the P2P system. + pub addrs: HashSet, + /// This is a function over a channel because we need to ensure the code runs prior to the peer being emitted to the application. + /// If not the peer would have no registered way to connect to it initially which would be confusing. + #[allow(clippy::type_complexity)] + pub acceptor: + HandlerFn, &HashSet) + Send + Sync>>, +} + +/// A little wrapper for functions to make them `Debug`. +#[derive(Clone)] +pub(crate) struct HandlerFn(pub(crate) F); + +impl fmt::Debug for HandlerFn { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "HandlerFn") + } +} diff --git a/crates/p2p/src/spacetunnel/identity.rs b/crates/p2p2/src/identity.rs similarity index 97% rename from crates/p2p/src/spacetunnel/identity.rs rename to crates/p2p2/src/identity.rs index 400830b14..5308292c0 100644 --- a/crates/p2p/src/spacetunnel/identity.rs +++ b/crates/p2p2/src/identity.rs @@ -1,3 +1,5 @@ +// TODO: Document all types in this file + use std::{ hash::{Hash, Hasher}, str::FromStr, @@ -177,7 +179,7 @@ pub enum IdentityOrRemoteIdentityErr { InvalidFormat, } -/// TODO +/// TODO: Remove this. I think it make security issues far too easy. #[derive(Debug, PartialEq)] pub enum IdentityOrRemoteIdentity { Identity(Identity), diff --git a/crates/p2p2/src/lib.rs b/crates/p2p2/src/lib.rs new file mode 100644 index 000000000..4183b6d90 --- /dev/null +++ b/crates/p2p2/src/lib.rs @@ -0,0 +1,24 @@ +//! Rust Peer to Peer Networking Library +#![warn(clippy::all, clippy::unwrap_used, clippy::panic)] + +pub(crate) mod hooks; +mod identity; +mod mdns; +mod p2p; +mod peer; +mod quic; +mod smart_guards; +mod stream; + +pub use hooks::{HookEvent, HookId, ListenerId, ShutdownGuard}; +pub use identity::{ + Identity, IdentityErr, IdentityOrRemoteIdentity, IdentityOrRemoteIdentityErr, RemoteIdentity, +}; +pub use mdns::Mdns; +pub use p2p::{Listener, P2P}; +pub use peer::{ConnectionRequest, Peer}; +pub use quic::{Libp2pPeerId, QuicTransport}; +pub use smart_guards::SmartWriteGuard; +pub use stream::UnicastStream; + +pub use flume; diff --git a/crates/p2p2/src/mdns.rs b/crates/p2p2/src/mdns.rs new file mode 100644 index 000000000..2b18e227b --- /dev/null +++ b/crates/p2p2/src/mdns.rs @@ -0,0 +1,212 @@ +use std::{ + collections::HashMap, net::SocketAddr, pin::Pin, str::FromStr, sync::Arc, time::Duration, +}; + +use flume::{bounded, Receiver}; +use mdns_sd::{ServiceDaemon, ServiceEvent, ServiceInfo}; +use tokio::time::{sleep_until, Instant, Sleep}; +use tracing::{error, trace, warn}; + +use crate::{HookEvent, HookId, RemoteIdentity, ShutdownGuard, P2P}; + +/// The time between re-advertising the mDNS service. +const MDNS_READVERTISEMENT_INTERVAL: Duration = Duration::from_secs(60); // Every minute re-advertise + +/// Multicast DNS (mDNS) is used for discovery of peers over local networks. +#[derive(Debug)] +pub struct Mdns { + p2p: Arc, + hook_id: HookId, +} + +impl Mdns { + pub fn spawn(p2p: Arc) -> Result { + let (tx, rx) = bounded(15); + let hook_id = p2p.register_hook("mdns", tx); + + start(p2p.clone(), hook_id, rx)?; + + Ok(Self { p2p, hook_id }) + } + + pub async fn shutdown(self) { + self.p2p.unregister_hook(self.hook_id).await; + } +} + +struct State { + hook_id: HookId, + p2p: Arc, + service_domain: String, + service_name: String, + mdns_daemon: ServiceDaemon, + next_mdns_advertisement: Pin>, +} + +fn start(p2p: Arc, hook_id: HookId, rx: Receiver) -> Result<(), mdns_sd::Error> { + let service_domain = format!("_{}._udp.local.", p2p.app_name()); + let mut state = State { + hook_id, + service_name: format!("{}.{service_domain}", p2p.remote_identity()), + service_domain, + p2p, + mdns_daemon: ServiceDaemon::new()?, + next_mdns_advertisement: Box::pin(sleep_until( + Instant::now() + MDNS_READVERTISEMENT_INTERVAL, + )), + }; + let mdns_service = state.mdns_daemon.browse(&state.service_domain)?; + + tokio::spawn(async move { + loop { + tokio::select! { + Ok(event) = rx.recv_async() => match event { + HookEvent::MetadataModified | HookEvent::ListenerRegistered(_) | HookEvent::ListenerAddrAdded(_, _) | HookEvent::ListenerAddrRemoved(_, _) | HookEvent::ListenerUnregistered(_) => advertise(&mut state), + HookEvent::Shutdown { _guard } => { + shutdown(_guard, &mut state); + break; + }, + _ => continue, + }, + _ = &mut state.next_mdns_advertisement => advertise(&mut state), + Ok(event) = mdns_service.recv_async() => on_event(&state, event) + }; + } + }); + + Ok(()) +} + +fn advertise(state: &mut State) { + let mut ports_to_service = HashMap::new(); + for addr in state.p2p.listeners().iter().flat_map(|l| l.addrs.clone()) { + ports_to_service + .entry(addr.port()) + .or_insert_with(Vec::new) + .push(addr.ip()); + } + + let meta = state.p2p.metadata().clone(); + for (port, ips) in ports_to_service { + let service = ServiceInfo::new( + &state.service_domain, + &state.p2p.remote_identity().to_string(), + &state.service_name, + &*ips, + port, + // TODO: If a piece of metadata overflows a DNS record take care of splitting it across multiple. + Some(meta.clone()), + ) + .map(|s| s.enable_addr_auto()); + + let service = match service { + Ok(service) => service, + Err(err) => { + warn!("error creating mdns service info: {}", err); + continue; + } + }; + + trace!("advertising mdns service: {:?}", service); + match state.mdns_daemon.register(service) { + Ok(()) => {} + Err(err) => warn!("error registering mdns service: {}", err), + } + } + + state.next_mdns_advertisement = + Box::pin(sleep_until(Instant::now() + MDNS_READVERTISEMENT_INTERVAL)); +} + +fn on_event(state: &State, event: ServiceEvent) { + match event { + ServiceEvent::ServiceResolved(info) => { + let Some(identity) = fullname_to_identity(state, info.get_fullname()) else { + return; + }; + + state.p2p.clone().discover_peer( + state.hook_id, + identity, + info.get_properties() + .iter() + .map(|p| (p.key().to_string(), p.val_str().to_string())) + .collect(), + info.get_addresses() + .iter() + .map(|addr| SocketAddr::new(*addr, info.get_port())) + .collect(), + ); + } + ServiceEvent::ServiceRemoved(_, fullname) => { + let Some(identity) = fullname_to_identity(state, &fullname) else { + return; + }; + + if let Some(peer) = state.p2p.peers().get(&identity) { + peer.undiscover_peer(state.hook_id); + } + } + ServiceEvent::SearchStarted(_) + | ServiceEvent::SearchStopped(_) + | ServiceEvent::ServiceFound(_, _) => {} + } +} + +fn fullname_to_identity( + State { + p2p, + service_domain, + .. + }: &State, + fullname: &str, +) -> Option { + let Some(identity) = fullname + .strip_suffix(service_domain) + .map(|s| &s[0..s.len() - 1]) + else { + warn!( + "resolved peer advertising itself with an invalid fullname '{}'", + fullname + ); + return None; + }; + + let Ok(identity) = RemoteIdentity::from_str(identity) else { + warn!("resolved peer advertising itself with an invalid remote identity '{identity}'"); + return None; + }; + + // Prevent discovery of the current peer. + if identity == p2p.remote_identity() { + return None; + } + + Some(identity) +} + +fn shutdown(_guard: ShutdownGuard, state: &mut State) { + if let Ok(chan) = state + .mdns_daemon + .unregister(&state.service_name) + .map_err(|err| { + error!( + "error removing mdns service '{}': {err}", + state.service_name + ); + }) { + let _ = chan.recv(); + }; + + // TODO: Without this mDNS is not sending it goodbye packets without a timeout. Try and remove this cause it makes shutdown slow. + std::thread::sleep(Duration::from_millis(100)); + + match state.mdns_daemon.shutdown() { + Ok(chan) => { + let _ = chan.recv(); + } + Err(err) => { + error!("error shutting down mdns daemon: {err}"); + } + } +} diff --git a/crates/p2p2/src/p2p.rs b/crates/p2p2/src/p2p.rs new file mode 100644 index 000000000..d84094a89 --- /dev/null +++ b/crates/p2p2/src/p2p.rs @@ -0,0 +1,386 @@ +use std::{ + collections::{hash_map::Entry, HashMap, HashSet}, + net::SocketAddr, + sync::{Arc, PoisonError, RwLock, RwLockReadGuard}, + time::Duration, +}; + +use flume::Sender; +use hash_map_diff::hash_map_diff; +use libp2p::futures::future::join_all; +use stable_vec::StableVec; +use tokio::{sync::oneshot, time::timeout}; +use tracing::info; + +use crate::{ + hooks::{HandlerFn, Hook, HookEvent, ListenerData, ListenerId, ShutdownGuard}, + smart_guards::SmartWriteGuard, + HookId, Identity, Peer, RemoteIdentity, UnicastStream, +}; + +/// Manager for the entire P2P system. +#[derive(Debug)] +pub struct P2P { + /// A unique identifier for the application. + /// This will differentiate between different applications using this same P2P library. + app_name: &'static str, + /// The identity of the local node. + /// This is the public/private keypair used to uniquely identify the node. + identity: Identity, + /// The channel is used by the application to handle incoming connections. + /// Connection's are automatically closed when dropped so if user forgets to subscribe to this that will just happen as expected. + handler_tx: Sender, + /// Metadata is shared from the local node to the remote nodes. + /// This will contain information such as the node's name, version, and services we provide. + metadata: RwLock>, + /// A list of all peers known to the P2P system. Be aware a peer could be connected and/or discovered at any time. + pub(crate) peers: RwLock>>, + /// Hooks can be registered to react to state changes in the P2P system. + pub(crate) hooks: RwLock>, +} + +impl P2P { + /// Construct a new P2P system. + pub fn new( + app_name: &'static str, + identity: Identity, + handler_tx: Sender, + ) -> Arc { + app_name + .chars() + .all(|c| char::is_alphanumeric(c) || c == '-') + .then_some(()) + .expect("'P2P::new': invalid app_name. Must be alphanumeric or '-' only."); + #[allow(clippy::panic)] + if app_name.len() > 12 { + panic!("'P2P::new': app_name too long. Must be 12 characters or less."); + } + + Arc::new(P2P { + app_name, + identity, + metadata: Default::default(), + peers: Default::default(), + handler_tx, + hooks: Default::default(), + }) + } + + /// The unique identifier for this application. + pub fn app_name(&self) -> &'static str { + self.app_name + } + + /// The identifier of this node that can *MUST* be kept secret. + /// This is a private key in crypto terms. + pub fn identity(&self) -> &Identity { + &self.identity + } + + /// The identifier of this node that can be shared. + /// This is a public key in crypto terms. + pub fn remote_identity(&self) -> RemoteIdentity { + self.identity.to_remote_identity() + } + + /// Metadata is shared from the local node to the remote nodes. + /// This will contain information such as the node's name, version, and services we provide. + pub fn metadata(&self) -> RwLockReadGuard> { + self.metadata.read().unwrap_or_else(PoisonError::into_inner) + } + + pub fn metadata_mut(&self) -> SmartWriteGuard> { + let lock = self + .metadata + .write() + .unwrap_or_else(PoisonError::into_inner); + + SmartWriteGuard::new(self, lock, |p2p, before, after| { + let diff = hash_map_diff(&before, after); + if diff.updated.is_empty() && diff.removed.is_empty() { + return; + } + + p2p.hooks + .read() + .unwrap_or_else(PoisonError::into_inner) + .iter() + .for_each(|(_, hook)| { + hook.send(HookEvent::MetadataModified); + }); + }) + } + + /// A list of all peers known to the P2P system. Be aware a peer could be connected and/or discovered at any time. + pub fn peers(&self) -> RwLockReadGuard>> { + self.peers.read().unwrap_or_else(PoisonError::into_inner) + } + + // TODO: Should this take `addrs`???, A connection through the Relay probs doesn't have one in the same form. + pub fn discover_peer( + self: Arc, + hook_id: HookId, + identity: RemoteIdentity, + metadata: HashMap, + addrs: HashSet, + ) -> Arc { + let mut peers = self.peers.write().unwrap_or_else(PoisonError::into_inner); + let peer = peers.entry(identity); + let was_peer_inserted = matches!(peer, Entry::Vacant(_)); + let peer = peer + .or_insert_with({ + let p2p = self.clone(); + || Peer::new(identity, p2p) + }) + .clone(); + + { + let mut state = peer.state.write().unwrap_or_else(PoisonError::into_inner); + state.discovered.insert(hook_id, addrs.clone()); + } + + peer.metadata_mut().extend(metadata); + + { + let hooks = self.hooks.read().unwrap_or_else(PoisonError::into_inner); + hooks + .iter() + .for_each(|(id, hook)| hook.acceptor(ListenerId(id), &peer, &addrs)); + + if was_peer_inserted { + hooks + .iter() + .for_each(|(_, hook)| hook.send(HookEvent::PeerAvailable(peer.clone()))); + } + + hooks.iter().for_each(|(_, hook)| { + hook.send(HookEvent::PeerDiscoveredBy(hook_id, peer.clone())) + }); + } + + peer + } + + pub fn connected_to( + self: Arc, + listener: ListenerId, + metadata: HashMap, + stream: UnicastStream, + shutdown_tx: oneshot::Sender<()>, + ) -> Arc { + let identity = stream.remote_identity(); + let mut peers = self.peers.write().unwrap_or_else(PoisonError::into_inner); + let peer = peers.entry(identity); + let was_peer_inserted = matches!(peer, Entry::Vacant(_)); + let peer = peer + .or_insert_with({ + let p2p = self.clone(); + move || Peer::new(identity, p2p) + }) + .clone(); + + { + let mut state = peer.state.write().unwrap_or_else(PoisonError::into_inner); + state.active_connections.insert(listener, shutdown_tx); + } + + peer.metadata_mut().extend(metadata); + + { + let hooks = self.hooks.read().unwrap_or_else(PoisonError::into_inner); + + if was_peer_inserted { + hooks + .iter() + .for_each(|(_, hook)| hook.send(HookEvent::PeerAvailable(peer.clone()))); + } + + hooks.iter().for_each(|(_, hook)| { + hook.send(HookEvent::PeerConnectedWith(listener, peer.clone())) + }); + } + + let _ = self.handler_tx.send(stream); + + peer + } + + /// All active listeners registered with the P2P system. + pub fn listeners(&self) -> Vec { + self.hooks + .read() + .unwrap_or_else(PoisonError::into_inner) + .iter() + .filter_map(|(id, hook)| { + hook.listener.as_ref().map(|listener| Listener { + id: ListenerId(id), + name: hook.name, + addrs: listener.addrs.clone(), + }) + }) + .collect() + } + + /// A listener is a special type of hook which is responsible for accepting incoming connections. + /// + /// It is expected you call `Self::register_listener_addr` after this to register the addresses you are listening on. + /// + /// `acceptor` is called when a peer is discovered, but before it is emitted to the application. + /// This lets you register a connection method if you have one. + pub fn register_listener( + &self, + name: &'static str, + tx: Sender, + acceptor: impl Fn(ListenerId, &Arc, &HashSet) + Send + Sync + 'static, + ) -> ListenerId { + let mut hooks = self.hooks.write().unwrap_or_else(PoisonError::into_inner); + let hook_id = hooks.push(Hook { + name, + tx, + listener: Some(ListenerData { + addrs: Default::default(), + acceptor: HandlerFn(Arc::new(acceptor)), + }), + }); + + hooks.iter().for_each(|(id, hook)| { + if id == hook_id { + return; + } + + hook.send(HookEvent::ListenerRegistered(ListenerId(hook_id))); + }); + + ListenerId(hook_id) + } + + pub fn register_listener_addr(&self, listener_id: ListenerId, addr: SocketAddr) { + let mut hooks = self.hooks.write().unwrap_or_else(PoisonError::into_inner); + if let Some(listener) = hooks + .get_mut(listener_id.0) + .and_then(|l| l.listener.as_mut()) + { + listener.addrs.insert(addr); + } + + info!("HookEvent::ListenerAddrAdded({listener_id:?}, {addr})"); + hooks.iter().for_each(|(_, hook)| { + hook.send(HookEvent::ListenerAddrAdded(listener_id, addr)); + }); + } + + pub fn unregister_listener_addr(&self, listener_id: ListenerId, addr: SocketAddr) { + let mut hooks = self.hooks.write().unwrap_or_else(PoisonError::into_inner); + if let Some(listener) = hooks + .get_mut(listener_id.0) + .and_then(|l| l.listener.as_mut()) + { + listener.addrs.remove(&addr); + } + + info!("HookEvent::ListenerAddrRemoved({listener_id:?}, {addr})"); + hooks.iter().for_each(|(_, hook)| { + hook.send(HookEvent::ListenerAddrRemoved(listener_id, addr)); + }); + } + + // TODO: Probs cleanup return type + pub fn hooks(&self) -> Vec<(HookId, &'static str)> { + self.hooks + .read() + .unwrap_or_else(PoisonError::into_inner) + .iter() + .map(|(id, hook)| (HookId(id), hook.name)) + .collect() + } + + /// Register a new hook which can be used to react to state changes in the P2P system. + pub fn register_hook(&self, name: &'static str, tx: Sender) -> HookId { + HookId( + self.hooks + .write() + .unwrap_or_else(PoisonError::into_inner) + .push(Hook { + name, + tx, + listener: None, + }), + ) + } + + /// Unregister a hook. This will also call `HookEvent::Shutdown` on the hook. + pub async fn unregister_hook(&self, id: HookId) { + let mut shutdown_rxs = Vec::new(); + { + let mut hooks = self.hooks.write().unwrap_or_else(PoisonError::into_inner); + if let Some(hook) = hooks.remove(id.0) { + let (_guard, rx) = ShutdownGuard::new(); + shutdown_rxs.push(rx); + hook.send(HookEvent::Shutdown { _guard }); + + if hook.listener.is_some() { + hooks.iter().for_each(|(_, hook)| { + hook.send(HookEvent::ListenerUnregistered(ListenerId(id.0))); + }); + } + + let mut peers = self.peers.write().unwrap_or_else(PoisonError::into_inner); + let mut peers_to_remove = HashSet::new(); // We are mutate while iterating + for (identity, peer) in peers.iter_mut() { + let mut state = peer.state.write().unwrap_or_else(PoisonError::into_inner); + if let Some(active_connection) = + state.active_connections.remove(&ListenerId(id.0)) + { + let _ = active_connection.send(()); + } + state.connection_methods.remove(&ListenerId(id.0)); + state.discovered.remove(&id); + + if state.connection_methods.is_empty() && state.discovered.is_empty() { + peers_to_remove.insert(*identity); + } + } + + for identity in peers_to_remove { + peers.remove(&identity); + } + } + } + + // We rely on the fact that when the oneshot is dropped this will return an error as opposed to hanging. + // So we can detect when the hooks shutdown code has completed. + let _ = timeout(Duration::from_secs(2), join_all(shutdown_rxs)).await; + } + + /// Shutdown the whole P2P system. + /// This will close all connections and remove all hooks. + pub async fn shutdown(&self) { + let hooks = { + self.hooks + .write() + .unwrap_or_else(PoisonError::into_inner) + .iter() + .map(|i| i.0) + .collect::>() + .clone() + }; + + for hook_id in hooks { + self.unregister_hook(HookId(hook_id)).await; + } + } +} + +#[derive(Debug)] +#[non_exhaustive] +pub struct Listener { + pub id: ListenerId, + pub name: &'static str, + pub addrs: HashSet, +} + +impl Listener { + pub fn is_hook_id(&self, id: HookId) -> bool { + self.id.0 == id.0 + } +} diff --git a/crates/p2p2/src/peer.rs b/crates/p2p2/src/peer.rs new file mode 100644 index 000000000..8b91233e7 --- /dev/null +++ b/crates/p2p2/src/peer.rs @@ -0,0 +1,267 @@ +use std::{ + collections::{HashMap, HashSet}, + net::SocketAddr, + sync::{Arc, PoisonError, RwLock, RwLockReadGuard, RwLockWriteGuard, Weak}, +}; + +use thiserror::Error; +use tokio::sync::{mpsc, oneshot}; +use tracing::warn; + +use crate::{HookEvent, HookId, ListenerId, RemoteIdentity, UnicastStream, P2P}; + +#[derive(Debug)] +pub struct Peer { + /// RemoteIdentity of the peer. + pub(crate) identity: RemoteIdentity, + /// Information from `P2P::service` on the remote node. + pub(crate) metadata: RwLock>, + /// We want these states to locked by the same lock so we can ensure they are consistent. + pub(crate) state: RwLock, + /// A reference back to the P2P system. + /// This is weak so we don't have recursive `Arc`'s that can never be dropped. + pub(crate) p2p: Weak, +} + +#[derive(Debug, Default)] +pub(crate) struct State { + /// Active connections with the remote + pub(crate) active_connections: HashMap>, + /// Methods for establishing an active connections with the remote + /// These should be inject by `Listener::acceptor` which is called when a new peer is discovered. + pub(crate) connection_methods: HashMap>, + /// Methods that have discovered this peer. + pub(crate) discovered: HashMap>, +} + +/// A request to connect to a client. +/// This will be handled by a configured listener hook. +#[derive(Debug)] +#[non_exhaustive] +pub struct ConnectionRequest { + pub to: RemoteIdentity, + pub addrs: HashSet, + pub tx: oneshot::Sender>, +} + +// TODO: Maybe use this? +// impl State { +// pub(crate) fn needs_removal(&self) -> bool { +// self.discovered.is_empty() +// && self.connection_methods.is_empty() +// && self.active_connections.is_empty() +// } +// } + +impl Eq for Peer {} +impl PartialEq for Peer { + fn eq(&self, other: &Self) -> bool { + self.identity == other.identity + } +} + +// Internal methods +impl Peer { + pub(crate) fn new(identity: RemoteIdentity, p2p: Arc) -> Arc { + Arc::new(Self { + identity, + metadata: Default::default(), + state: Default::default(), + p2p: Arc::downgrade(&p2p), + }) + } +} + +// User-facing methods +impl Peer { + pub fn identity(&self) -> RemoteIdentity { + self.identity + } + + pub fn metadata(&self) -> RwLockReadGuard> { + self.metadata.read().unwrap_or_else(PoisonError::into_inner) + } + + pub fn metadata_mut(&self) -> RwLockWriteGuard> { + self.metadata + .write() + .unwrap_or_else(PoisonError::into_inner) + } + + pub fn can_connect(&self) -> bool { + !self + .state + .read() + .unwrap_or_else(PoisonError::into_inner) + .connection_methods + .is_empty() + } + + pub fn is_connected(&self) -> bool { + !self + .state + .read() + .unwrap_or_else(PoisonError::into_inner) + .active_connections + .is_empty() + } + + pub fn active_connections(&self) -> usize { + self.state + .read() + .unwrap_or_else(PoisonError::into_inner) + .active_connections + .len() + } + + pub fn connection_methods(&self) -> HashSet { + self.state + .read() + .unwrap_or_else(PoisonError::into_inner) + .connection_methods + .keys() + .copied() + .collect() + } + + pub fn discovered_by(&self) -> HashSet { + self.state + .read() + .unwrap_or_else(PoisonError::into_inner) + .discovered + .keys() + .copied() + .collect() + } + + /// Construct a new Quic stream to the peer. + pub async fn new_stream(&self) -> Result { + let (addrs, connect_tx) = { + let state = self.state.read().unwrap_or_else(PoisonError::into_inner); + + let addrs = state + .discovered + .values() + .flatten() + .cloned() + .collect::>(); + + let Some((_id, connect_tx)) = state + .connection_methods + .iter() + .map(|(id, tx)| (*id, tx.clone())) + .next() + else { + return Err(NewStreamError::NoConnectionMethodsAvailable); + }; + + (addrs, connect_tx) + }; + + let (tx, rx) = oneshot::channel(); + connect_tx + .send(ConnectionRequest { + to: self.identity, + addrs, + tx, + }) + .await + .map_err(|err| { + warn!("Failed to send connect request to peer: {}", err); + NewStreamError::EventLoopOffline(err) + })?; + rx.await + .map_err(|err| { + warn!("Failed to receive connect response from peer: {err}"); + NewStreamError::ConnectionNeverEstablished(err) + })? + .map_err(|err| { + warn!("Failed to do the thing: {err}"); + NewStreamError::Connecting(err) + }) + } +} + +// Hook-facing methods +impl Peer { + pub fn hook_discovered(&self, hook: HookId, addrs: HashSet) { + // TODO: Emit event maybe??? + + self.state + .write() + .unwrap_or_else(PoisonError::into_inner) + .discovered + .insert(hook, addrs); + } + + pub fn listener_available(&self, listener: ListenerId, tx: mpsc::Sender) { + self.state + .write() + .unwrap_or_else(PoisonError::into_inner) + .connection_methods + .insert(listener, tx); + } + + pub fn undiscover_peer(&self, hook_id: HookId) { + let Some(p2p) = self.p2p.upgrade() else { + return; + }; + + let mut state = self.state.write().unwrap_or_else(PoisonError::into_inner); + state.discovered.remove(&hook_id); + + let hooks = p2p.hooks.read().unwrap_or_else(PoisonError::into_inner); + hooks.iter().for_each(|(_, hook)| { + hook.send(HookEvent::PeerExpiredBy(hook_id, self.identity)); + }); + + if state.connection_methods.is_empty() && state.discovered.is_empty() { + p2p.peers + .write() + .unwrap_or_else(PoisonError::into_inner) + .remove(&self.identity); + + hooks.iter().for_each(|(_, hook)| { + hook.send(HookEvent::PeerUnavailable(self.identity)); + }); + } + } + + pub fn disconnected_from(&self, listener_id: ListenerId) { + let Some(p2p) = self.p2p.upgrade() else { + return; + }; + + let mut state = self.state.write().unwrap_or_else(PoisonError::into_inner); + state.connection_methods.remove(&listener_id); + state.active_connections.remove(&listener_id); + + let hooks = p2p.hooks.read().unwrap_or_else(PoisonError::into_inner); + hooks.iter().for_each(|(_, hook)| { + hook.send(HookEvent::PeerDisconnectedWith(listener_id, self.identity)); + }); + + if state.connection_methods.is_empty() && state.discovered.is_empty() { + p2p.peers + .write() + .unwrap_or_else(PoisonError::into_inner) + .remove(&self.identity); + + hooks.iter().for_each(|(_, hook)| { + hook.send(HookEvent::PeerUnavailable(self.identity)); + }); + } + } +} + +#[derive(Debug, Error)] +pub enum NewStreamError { + #[error("No connection methods available for peer")] + NoConnectionMethodsAvailable, + #[error("The event loop is offline")] + EventLoopOffline(mpsc::error::SendError), + #[error("Failed to establish the connection w/ error: {0}")] + ConnectionNeverEstablished(oneshot::error::RecvError), + #[error("error connecting to peer: {0}")] + Connecting(String), +} diff --git a/crates/p2p2/src/quic/mod.rs b/crates/p2p2/src/quic/mod.rs new file mode 100644 index 000000000..e2447a072 --- /dev/null +++ b/crates/p2p2/src/quic/mod.rs @@ -0,0 +1,4 @@ +pub(super) mod transport; +pub(super) mod utils; + +pub use transport::{Libp2pPeerId, QuicTransport}; diff --git a/crates/p2p2/src/quic/transport.rs b/crates/p2p2/src/quic/transport.rs new file mode 100644 index 000000000..c2dd3a601 --- /dev/null +++ b/crates/p2p2/src/quic/transport.rs @@ -0,0 +1,372 @@ +use std::{ + collections::{HashMap, HashSet}, + convert::Infallible, + net::{Ipv4Addr, Ipv6Addr, SocketAddr}, + sync::{Arc, PoisonError, RwLock}, + time::Duration, +}; + +use flume::{bounded, Receiver, Sender}; +use libp2p::{ + core::muxing::StreamMuxerBox, + futures::{AsyncReadExt, AsyncWriteExt, StreamExt}, + swarm::SwarmEvent, + StreamProtocol, Swarm, SwarmBuilder, Transport, +}; +use libp2p_stream::Behaviour; +use tokio::{ + net::TcpListener, + sync::{mpsc, oneshot}, + time::timeout, +}; +use tokio_util::compat::FuturesAsyncReadCompatExt; +use tracing::{debug, warn}; + +use crate::{ + identity::REMOTE_IDENTITY_LEN, + quic::utils::{ + identity_to_libp2p_keypair, remote_identity_to_libp2p_peerid, socketaddr_to_quic_multiaddr, + }, + ConnectionRequest, HookEvent, ListenerId, RemoteIdentity, UnicastStream, P2P, +}; + +const PROTOCOL: StreamProtocol = StreamProtocol::new("/sdp2p/1"); + +/// [libp2p::PeerId] for debugging purposes only. +#[derive(Debug)] +pub struct Libp2pPeerId(libp2p::PeerId); + +#[derive(Debug)] +enum InternalEvent { + RegisterListener { + id: ListenerId, + ipv4: bool, + addr: SocketAddr, + result: oneshot::Sender>, + }, + UnregisterListener { + id: ListenerId, + ipv4: bool, + result: oneshot::Sender>, + }, +} + +/// Transport using Quic to establish a connection between peers. +/// This uses `libp2p` internally. +#[derive(Debug)] +pub struct QuicTransport { + id: ListenerId, + p2p: Arc, + internal_tx: Sender, +} + +impl QuicTransport { + /// Spawn the `QuicTransport` and register it with the P2P system. + /// Be aware spawning this does nothing unless you call `Self::set_ipv4_enabled`/`Self::set_ipv6_enabled` to enable the listeners. + // TODO: Error type here + pub fn spawn(p2p: Arc) -> Result<(Self, Libp2pPeerId), String> { + let keypair = identity_to_libp2p_keypair(p2p.identity()); + let libp2p_peer_id = Libp2pPeerId(keypair.public().to_peer_id()); + + let (tx, rx) = bounded(15); + let (internal_tx, internal_rx) = bounded(15); + let (connect_tx, connect_rx) = mpsc::channel(15); + let id = p2p.register_listener("libp2p-quic", tx, move |listener_id, peer, _addrs| { + // TODO: I don't love this always being registered. Really it should only show up if the other device is online (do a ping-type thing)??? + peer.listener_available(listener_id, connect_tx.clone()); + }); + + let swarm = ok(ok(SwarmBuilder::with_existing_identity(keypair) + .with_tokio() + .with_other_transport(|keypair| { + libp2p_quic::GenTransport::::new( + libp2p_quic::Config::new(keypair), + ) + .map(|(p, c), _| (p, StreamMuxerBox::new(c))) + .boxed() + })) + .with_behaviour(|_| Behaviour::new())) + .with_swarm_config(|c| c.with_idle_connection_timeout(Duration::from_secs(60))) + .build(); + + tokio::spawn(start(p2p.clone(), id, swarm, rx, internal_rx, connect_rx)); + + Ok(( + Self { + id, + p2p, + internal_tx, + }, + libp2p_peer_id, + )) + } + + // `None` on the port means disabled. Use `0` for random port. + pub async fn set_ipv4_enabled(&self, port: Option) -> Result<(), String> { + self.setup_listener( + port.map(|p| SocketAddr::from((Ipv4Addr::UNSPECIFIED, p))), + true, + ) + .await + } + + pub async fn set_ipv6_enabled(&self, port: Option) -> Result<(), String> { + self.setup_listener( + port.map(|p| SocketAddr::from((Ipv6Addr::UNSPECIFIED, p))), + false, + ) + .await + } + + // TODO: Proper error type + async fn setup_listener(&self, addr: Option, ipv4: bool) -> Result<(), String> { + let (tx, rx) = oneshot::channel(); + let event = if let Some(mut addr) = addr { + if addr.port() == 0 { + #[allow(clippy::unwrap_used)] // TODO: Error handling + addr.set_port( + TcpListener::bind(addr) + .await + .unwrap() + .local_addr() + .unwrap() + .port(), + ); + } + + InternalEvent::RegisterListener { + id: self.id, + ipv4, + addr, + result: tx, + } + } else { + InternalEvent::UnregisterListener { + id: self.id, + ipv4, + result: tx, + } + }; + + let Ok(_) = self.internal_tx.send(event) else { + return Err("internal channel closed".to_string()); + }; + rx.await + .map_err(|_| "internal response channel closed".to_string()) + .and_then(|r| r) + } + + pub async fn shutdown(self) { + self.p2p.unregister_hook(self.id.into()).await; + } +} + +fn ok(v: Result) -> T { + match v { + Ok(v) => v, + Err(_) => unreachable!(), + } +} + +async fn start( + p2p: Arc, + id: ListenerId, + mut swarm: Swarm, + rx: Receiver, + internal_rx: Receiver, + mut connect_rx: mpsc::Receiver, +) { + let mut ipv4_listener = None; + let mut ipv6_listener = None; + + let mut control = swarm.behaviour().new_control(); + #[allow(clippy::unwrap_used)] // TODO: Error handling + let mut incoming = control.accept(PROTOCOL).unwrap(); + let map = Arc::new(RwLock::new(HashMap::new())); + + loop { + tokio::select! { + Ok(event) = rx.recv_async() => match event { + HookEvent::PeerExpiredBy(_, identity) => { + println!("CHECKING {:?}", identity); // TODO + + let Some(peer) = p2p.peers.read().unwrap_or_else(PoisonError::into_inner).get(&identity).map(Clone::clone) else { + continue; + }; + + let addrs = { + let state = peer.state.read().unwrap_or_else(PoisonError::into_inner); + + state + .discovered + .values() + .flatten() + .cloned() + .collect::>() + }; + + let peer_id = remote_identity_to_libp2p_peerid(&identity); + + let mut control = control.clone(); + tokio::spawn(async move { + match timeout(Duration::from_secs(5), control.open_stream_with_addrs( + peer_id, + PROTOCOL, + addrs.iter() + .map(socketaddr_to_quic_multiaddr) + .collect() + )).await { + Ok(Ok(_)) => {} + Err(_) | Ok(Err(_)) => peer.disconnected_from(id), + }; + }); + }, + HookEvent::Shutdown { _guard } => { + let connected_peers = swarm.connected_peers().cloned().collect::>(); + for peer_id in connected_peers { + let _ = swarm.disconnect_peer_id(peer_id); + } + + if let Some((id, _)) = ipv4_listener.take() { + let _ = swarm.remove_listener(id); + } + if let Some((id, _)) = ipv6_listener.take() { + let _ = swarm.remove_listener(id); + } + + // TODO: We don't break the event loop so libp2p can be polled to keep cleaning up. + // break; + }, + _ => {}, + }, + Some((peer_id, mut stream)) = incoming.next() => { + let p2p = p2p.clone(); + let map = map.clone(); + tokio::spawn(async move { + let mut actual = [0; REMOTE_IDENTITY_LEN]; + match stream.read_exact(&mut actual).await { + Ok(_) => {}, + Err(e) => { + warn!("Failed to read remote identity with libp2p::PeerId({peer_id:?}): {e:?}"); + return; + }, + } + let identity = match RemoteIdentity::from_bytes(&actual) { + Ok(i) => i, + Err(e) => { + warn!("Failed to parse remote identity with libp2p::PeerId({peer_id:?}): {e:?}"); + return; + }, + }; + + // We need to go `PeerId -> RemoteIdentity` but as `PeerId` is a hash that's impossible. + // So to make this work the connection initiator will send their remote identity. + // It is however untrusted as they could send anything, so we convert it to a PeerId and check it matches the PeerId for this connection. + // If it matches, we are certain they own the private key as libp2p takes care of ensuring the PeerId is trusted. + let remote_identity_peer_id = remote_identity_to_libp2p_peerid(&identity); + if peer_id != remote_identity_peer_id { + warn!("Derived remote identity '{remote_identity_peer_id:?}' does not match libp2p::PeerId({peer_id:?})"); + return; + } + map.write().unwrap_or_else(PoisonError::into_inner).insert(peer_id, identity); + + // TODO: Sync metadata + let metadata = HashMap::new(); + + let stream = UnicastStream::new(identity, stream.compat()); + let (shutdown_tx, shutdown_rx) = oneshot::channel(); + p2p.connected_to( + id, + metadata, + stream, + shutdown_tx, + ); + + debug!("established inbound stream with '{}'", identity); + + let _todo = shutdown_rx; // TODO: Handle `shutdown_rx` + }); + }, + event = swarm.select_next_some() => if let SwarmEvent::ConnectionClosed { peer_id, num_established: 0, .. } = event { + let Some(identity) = map.write().unwrap_or_else(PoisonError::into_inner).remove(&peer_id) else { + warn!("Tried to remove a peer that wasn't in the map."); + continue; + }; + + let peers = p2p.peers.read().unwrap_or_else(PoisonError::into_inner); + let Some(peer) = peers.get(&identity) else { + warn!("Tried to remove a peer that wasn't in the P2P system."); + continue; + }; + + peer.disconnected_from(id); + }, + Ok(event) = internal_rx.recv_async() => match event { + InternalEvent::RegisterListener { id, ipv4, addr, result } => { + match swarm.listen_on(socketaddr_to_quic_multiaddr(&addr)) { + Ok(libp2p_listener_id) => { + let this = match ipv4 { + true => &mut ipv4_listener, + false => &mut ipv6_listener, + }; + // TODO: Diff the `addr` & if it's changed actually update it + if this.is_none() { + *this = Some((libp2p_listener_id, addr)); + p2p.register_listener_addr(id, addr); + } + + let _ = result.send(Ok(())); + }, + Err(e) => { + let _ = result.send(Err(e.to_string())); + }, + } + }, + InternalEvent::UnregisterListener { id, ipv4, result } => { + let this = match ipv4 { + true => &mut ipv4_listener, + false => &mut ipv6_listener, + }; + if let Some((addr_id, addr)) = this.take() { + if swarm.remove_listener(addr_id) { + p2p.unregister_listener_addr(id, addr); + } + } + let _ = result.send(Ok(())); + }, + }, + Some(req) = connect_rx.recv() => { + let mut control = control.clone(); + let self_remote_identity = p2p.identity().to_remote_identity(); + let map = map.clone(); + tokio::spawn(async move { + let peer_id = remote_identity_to_libp2p_peerid(&req.to); + match control.open_stream_with_addrs( + peer_id, + PROTOCOL, + req.addrs.iter() + .map(socketaddr_to_quic_multiaddr) + .collect() + ).await { + Ok(mut stream) => { + map.write().unwrap_or_else(PoisonError::into_inner).insert(peer_id, req.to); + + match stream.write_all(&self_remote_identity.get_bytes()).await { + Ok(_) => { + debug!("Established outbound stream with '{}'", req.to); + let _ = req.tx.send(Ok(UnicastStream::new(req.to, stream.compat()))); + }, + Err(e) => { + let _ = req.tx.send(Err(e.to_string())); + }, + } + }, + Err(e) => { + let _ = req.tx.send(Err(e.to_string())); + }, + } + }); + } + } + } +} diff --git a/crates/p2p2/src/quic/utils.rs b/crates/p2p2/src/quic/utils.rs new file mode 100644 index 000000000..2507e36a1 --- /dev/null +++ b/crates/p2p2/src/quic/utils.rs @@ -0,0 +1,38 @@ +//! This file contains some fairly meaningless glue code for integrating with libp2p. + +use std::net::SocketAddr; + +use libp2p::{identity::Keypair, multiaddr::Protocol, Multiaddr, PeerId}; + +use crate::{Identity, RemoteIdentity}; + +#[must_use] +pub(crate) fn socketaddr_to_quic_multiaddr(m: &SocketAddr) -> Multiaddr { + let mut addr = Multiaddr::empty(); + match m { + SocketAddr::V4(ip) => addr.push(Protocol::Ip4(*ip.ip())), + SocketAddr::V6(ip) => addr.push(Protocol::Ip6(*ip.ip())), + } + addr.push(Protocol::Udp(m.port())); + addr.push(Protocol::QuicV1); + addr +} + +// This is sketchy, but it makes the whole system a lot easier to work with +// We are assuming the libp2p `PublicKey` is the same format as our `RemoteIdentity` type. +// This is *acktually* true but they reserve the right to change it at any point. +#[must_use] +pub fn remote_identity_to_libp2p_peerid(identity: &RemoteIdentity) -> PeerId { + let public_key = libp2p::identity::ed25519::PublicKey::try_from_bytes(&identity.get_bytes()) + .expect("should be the same format"); + PeerId::from_public_key(&public_key.into()) +} + +// This is sketchy, but it makes the whole system a lot easier to work with +// We are assuming the libp2p `Keypair` is the same format as our `Identity` type. +// This is *acktually* true but they reserve the right to change it at any point. +#[must_use] +pub fn identity_to_libp2p_keypair(identity: &Identity) -> Keypair { + libp2p::identity::Keypair::ed25519_from_bytes(identity.to_bytes()) + .expect("should be the same format") +} diff --git a/crates/p2p2/src/smart_guards.rs b/crates/p2p2/src/smart_guards.rs new file mode 100644 index 000000000..6177ed930 --- /dev/null +++ b/crates/p2p2/src/smart_guards.rs @@ -0,0 +1,55 @@ +use std::{ + ops::{Deref, DerefMut}, + sync::RwLockWriteGuard, +}; + +use crate::P2P; + +type SaveFn = fn(&P2P, /* before */ T, /* after */ &T); + +/// A special guard for `RwLockWriteGuard` that will call a `save` function when it's dropped. +/// This allows changes to the value to automatically trigger `HookEvents` to be emitted. +#[derive(Debug)] +pub struct SmartWriteGuard<'a, T> { + p2p: &'a P2P, + lock: RwLockWriteGuard<'a, T>, + before: Option, + save: SaveFn, +} + +impl<'a, T: Clone> SmartWriteGuard<'a, T> { + pub(crate) fn new(p2p: &'a P2P, lock: RwLockWriteGuard<'a, T>, save: SaveFn) -> Self { + Self { + p2p, + before: Some(lock.clone()), + lock, + save, + } + } +} + +impl<'a, T> Deref for SmartWriteGuard<'a, T> { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.lock + } +} + +impl<'a, T> DerefMut for SmartWriteGuard<'a, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.lock + } +} + +impl<'a, T> Drop for SmartWriteGuard<'a, T> { + fn drop(&mut self) { + (self.save)( + self.p2p, + self.before + .take() + .expect("'SmartWriteGuard::drop' called more than once!"), + &self.lock, + ); + } +} diff --git a/crates/p2p2/src/stream.rs b/crates/p2p2/src/stream.rs new file mode 100644 index 000000000..6fd106f5e --- /dev/null +++ b/crates/p2p2/src/stream.rs @@ -0,0 +1,81 @@ +use std::{ + fmt, io, + pin::Pin, + task::{Context, Poll}, +}; + +use sync_wrapper::SyncWrapper; +use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadBuf}; + +use crate::RemoteIdentity; + +trait IoStream: AsyncRead + AsyncWrite {} +impl IoStream for S {} + +/// A unicast stream is a direct stream to a specific peer. +pub struct UnicastStream { + io: SyncWrapper>>, + remote: RemoteIdentity, +} + +impl fmt::Debug for UnicastStream { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("UnicastStream") + .field("remote", &self.remote) + .finish() + } +} + +impl UnicastStream { + pub fn new(remote: RemoteIdentity, io: S) -> Self { + Self { + io: SyncWrapper::new(Box::pin(io)), + remote, + } + } + + #[must_use] + pub fn remote_identity(&self) -> RemoteIdentity { + self.remote + } + + pub async fn close(self) -> Result<(), io::Error> { + self.io.into_inner().shutdown().await + } +} + +impl AsyncRead for UnicastStream { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut ReadBuf<'_>, + ) -> Poll> { + Pin::new(&mut self.get_mut().io) + .get_pin_mut() + .poll_read(cx, buf) + } +} + +impl AsyncWrite for UnicastStream { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + Pin::new(&mut self.get_mut().io) + .get_pin_mut() + .poll_write(cx, buf) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.get_mut().io) + .get_pin_mut() + .poll_flush(cx) + } + + fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.get_mut().io) + .get_pin_mut() + .poll_shutdown(cx) + } +} diff --git a/interface/app/$libraryId/debug/p2p.tsx b/interface/app/$libraryId/debug/p2p.tsx index ccd8a7ff0..18e2d875c 100644 --- a/interface/app/$libraryId/debug/p2p.tsx +++ b/interface/app/$libraryId/debug/p2p.tsx @@ -1,15 +1,24 @@ -import { useBridgeQuery, useCache, useConnectedPeers, useNodes } from '@sd/client'; +import { + useBridgeMutation, + useBridgeQuery, + useCache, + useConnectedPeers, + useDiscoveredPeers, + useNodes +} from '@sd/client'; +import { Button, toast } from '@sd/ui'; export const Component = () => { const node = useBridgeQuery(['nodeState']); return (
- {node.data?.p2p_enabled === false ? ( + {/* {node.data?.p2p_enabled === false ? (

P2P is disabled. Please enable it in settings!

) : ( - )} + )} */} +
); }; @@ -20,19 +29,46 @@ function Page() { }); const result = useBridgeQuery(['library.list']); const connectedPeers = useConnectedPeers(); + const discoveredPeers = useDiscoveredPeers(); useNodes(result.data?.nodes); const libraries = useCache(result.data?.items); + const debugConnect = useBridgeMutation(['p2p.debugConnect'], { + onSuccess: () => { + toast.success('Connected!'); + }, + onError: (e) => { + toast.error(`Error connecting '${e.message}'`); + } + }); return (
-
-

Connected to:

- {connectedPeers.size === 0 &&

None

} - {[...connectedPeers.entries()].map(([id, node]) => ( -
-

{id}

-
- ))} +
+
+

Discovered:

+ {discoveredPeers.size === 0 &&

None

} + {[...discoveredPeers.entries()].map(([id, _node]) => ( +
+

{id}

+ +
+ ))} +
+
+

Connected to:

+ {connectedPeers.size === 0 &&

None

} + {[...connectedPeers.entries()].map(([id, node]) => ( +
+

{id}

+
+ ))} +
diff --git a/interface/app/$libraryId/settings/client/general.tsx b/interface/app/$libraryId/settings/client/general.tsx index d81e336ed..d42b38986 100644 --- a/interface/app/$libraryId/settings/client/general.tsx +++ b/interface/app/$libraryId/settings/client/general.tsx @@ -53,9 +53,9 @@ export const Component = () => { schema: z .object({ name: z.string().min(1).max(250).optional(), - p2p_enabled: z.boolean().optional(), - p2p_port: u16, - customOrDefault: z.enum(['Custom', 'Default']), + // p2p_enabled: z.boolean().optional(), + // p2p_port: u16, + // customOrDefault: z.enum(['Custom', 'Default']), image_labeler_version: z.string().optional(), background_processing_percentage: z.coerce .number({ @@ -69,25 +69,28 @@ export const Component = () => { reValidateMode: 'onChange', defaultValues: { name: node.data?.name, - p2p_port: node.data?.p2p_port || 0, - p2p_enabled: node.data?.p2p_enabled, - customOrDefault: node.data?.p2p_port ? 'Custom' : 'Default', + // p2p_port: node.data?.p2p_port || 0, + // p2p_enabled: node.data?.p2p_enabled, + // customOrDefault: node.data?.p2p_port ? 'Custom' : 'Default', image_labeler_version: node.data?.image_labeler_version ?? undefined, background_processing_percentage: node.data?.preferences.thumbnailer.background_processing_percentage || 50 } }); - const watchCustomOrDefault = form.watch('customOrDefault'); - const watchP2pEnabled = form.watch('p2p_enabled'); + // const watchCustomOrDefault = form.watch('customOrDefault'); + // const watchP2pEnabled = form.watch('p2p_enabled'); const watchBackgroundProcessingPercentage = form.watch('background_processing_percentage'); useDebouncedFormWatch(form, async (value) => { if (await form.trigger()) { await editNode.mutateAsync({ name: value.name || null, - p2p_port: value.customOrDefault === 'Default' ? 0 : Number(value.p2p_port), - p2p_enabled: value.p2p_enabled ?? null, + p2p_ipv4_port: null, + p2p_ipv6_port: null, + p2p_discovery: null, + // p2p_port: value.customOrDefault === 'Default' ? 0 : Number(value.p2p_port), + // p2p_enabled: value.p2p_enabled ?? null, image_labeler_version: value.image_labeler_version ?? null }); @@ -101,11 +104,11 @@ export const Component = () => { node.refetch(); }); - form.watch((data) => { - if (Number(data.p2p_port) > 65535) { - form.setValue('p2p_port', 65535); - } - }); + // form.watch((data) => { + // if (Number(data.p2p_port) > 65535) { + // form.setValue('p2p_port', 65535); + // } + // }); const { t } = useLocale(); @@ -124,13 +127,13 @@ export const Component = () => { {connectedPeers.size} {t('peers')} - {node.data?.p2p_enabled === true ? ( + {/* {node.data?.p2p_enabled === true ? ( {t('running')} ) : ( {t('disabled')} - )} + )} */}
@@ -321,11 +324,12 @@ export const Component = () => { {/* TODO: Switch doesn't handle optional fields correctly */} form.setValue('p2p_enabled', !form.getValues('p2p_enabled'))} + // checked={watchP2pEnabled || false} + // onClick={() => form.setValue('p2p_enabled', !form.getValues('p2p_enabled'))} + disabled /> - { }} />
- + */} ); diff --git a/interface/app/p2p/index.tsx b/interface/app/p2p/index.tsx index aa7189e59..f59a492cf 100644 --- a/interface/app/p2p/index.tsx +++ b/interface/app/p2p/index.tsx @@ -3,47 +3,47 @@ import { useBridgeQuery, useFeatureFlag, useP2PEvents, withFeatureFlag } from '@ import { toast } from '@sd/ui'; export function useP2PErrorToast() { - const nodeState = useBridgeQuery(['nodeState']); - const [didShowError, setDidShowError] = useState({ - ipv4: false, - ipv6: false - }); + // const nodeState = useBridgeQuery(['nodeState']); + // const [didShowError, setDidShowError] = useState({ + // ipv4: false, + // ipv6: false + // }); - // TODO: This can probally be improved in the future. Theorically if you enable -> disable -> then enable and it fails both enables the error won't be shown. - useEffect(() => { - const ipv4Error = - (nodeState.data?.p2p_enabled && nodeState.data?.p2p.ipv4.status === 'Error') || false; - const ipv6Error = - (nodeState.data?.p2p_enabled && nodeState.data?.p2p.ipv6.status === 'Error') || false; + // // TODO: This can probally be improved in the future. Theorically if you enable -> disable -> then enable and it fails both enables the error won't be shown. + // useEffect(() => { + // const ipv4Error = + // (nodeState.data?.p2p_enabled && nodeState.data?.p2p.ipv4.status === 'Error') || false; + // const ipv6Error = + // (nodeState.data?.p2p_enabled && nodeState.data?.p2p.ipv6.status === 'Error') || false; - if (!didShowError.ipv4 && ipv4Error) - toast.error( - { - title: 'Error starting up P2P!', - body: 'Error creating the IPv4 listener. Please check your firewall settings!' - }, - { - id: 'ipv4-listener-error' - } - ); + // if (!didShowError.ipv4 && ipv4Error) + // toast.error( + // { + // title: 'Error starting up P2P!', + // body: 'Error creating the IPv4 listener. Please check your firewall settings!' + // }, + // { + // id: 'ipv4-listener-error' + // } + // ); - if (!didShowError.ipv6 && ipv6Error) - toast.error( - { - title: 'Error starting up P2P!', - body: 'Error creating the IPv6 listener. Please check your firewall settings!' - }, - { - id: 'ipv6-listener-error' - } - ); + // if (!didShowError.ipv6 && ipv6Error) + // toast.error( + // { + // title: 'Error starting up P2P!', + // body: 'Error creating the IPv6 listener. Please check your firewall settings!' + // }, + // { + // id: 'ipv6-listener-error' + // } + // ); - setDidShowError({ - ipv4: ipv4Error, - ipv6: ipv6Error - }); - // eslint-disable-next-line react-hooks/exhaustive-deps - }, [nodeState.data]); + // setDidShowError({ + // ipv4: ipv4Error, + // ipv6: ipv6Error + // }); + // // eslint-disable-next-line react-hooks/exhaustive-deps + // }, [nodeState.data]); return null; } diff --git a/packages/client/src/core.ts b/packages/client/src/core.ts index 5d6e4b5b7..a676394a0 100644 --- a/packages/client/src/core.ts +++ b/packages/client/src/core.ts @@ -112,6 +112,7 @@ export type Procedures = { { key: "nodes.updateThumbnailerPreferences", input: UpdateThumbnailerPreferences, result: null } | { key: "p2p.acceptSpacedrop", input: [string, string | null], result: null } | { key: "p2p.cancelSpacedrop", input: string, result: null } | + { key: "p2p.debugConnect", input: RemoteIdentity, result: string } | { key: "p2p.spacedrop", input: SpacedropArgs, result: string } | { key: "preferences.update", input: LibraryArgs, result: null } | { key: "search.saved.create", input: LibraryArgs<{ name: string; search?: string | null; filters?: string | null; description?: string | null; icon?: string | null }>, result: null } | @@ -160,7 +161,7 @@ export type CacheNode = { __type: string; __id: string; "#node": any } export type CameraData = { device_make: string | null; device_model: string | null; color_space: string | null; color_profile: ColorProfile | null; focal_length: number | null; shutter_speed: number | null; flash: Flash | null; orientation: Orientation; lens_make: string | null; lens_model: string | null; bit_depth: number | null; red_eye: boolean | null; zoom: number | null; iso: number | null; software: string | null; serial_number: string | null; lens_serial_number: string | null; contrast: number | null; saturation: number | null; sharpness: number | null; composite: Composite | null } -export type ChangeNodeNameArgs = { name: string | null; p2p_port: MaybeUndefined; p2p_enabled: boolean | null; image_labeler_version: string | null } +export type ChangeNodeNameArgs = { name: string | null; p2p_ipv4_port: Port | null; p2p_ipv6_port: Port | null; p2p_discovery: P2PDiscoveryState | null; image_labeler_version: string | null } export type CloudInstance = { id: string; uuid: string; identity: RemoteIdentity; nodeId: string; nodeName: string; nodePlatform: number } @@ -247,7 +248,7 @@ export type FileDeleterJobInit = { location_id: number; file_path_ids: number[] export type FileEraserJobInit = { location_id: number; file_path_ids: number[]; passes: string } -export type FilePath = { id: number; pub_id: number[]; is_dir: boolean | null; cas_id: string | null; integrity_checksum: string | null; location_id: number | null; materialized_path: string | null; name: string | null; extension: string | null; hidden: boolean | null; size_in_bytes: string | null; size_in_bytes_bytes: number[] | null; inode: number[] | null; object_id: number | null; date_created: string | null; date_modified: string | null; date_indexed: string | null } +export type FilePath = { id: number; pub_id: number[]; is_dir: boolean | null; cas_id: string | null; integrity_checksum: string | null; location_id: number | null; materialized_path: string | null; name: string | null; extension: string | null; hidden: boolean | null; size_in_bytes: string | null; size_in_bytes_bytes: number[] | null; inode: number[] | null; object_id: number | null; key_id: number | null; date_created: string | null; date_modified: string | null; date_indexed: string | null } export type FilePathCursor = { isDir: boolean; variant: FilePathCursorVariant } @@ -261,7 +262,7 @@ export type FilePathOrder = { field: "name"; value: SortOrder } | { field: "size export type FilePathSearchArgs = { take?: number | null; orderAndPagination?: OrderAndPagination | null; filters?: SearchFilterArgs[]; groupDirectories?: boolean } -export type FilePathWithObject = { id: number; pub_id: number[]; is_dir: boolean | null; cas_id: string | null; integrity_checksum: string | null; location_id: number | null; materialized_path: string | null; name: string | null; extension: string | null; hidden: boolean | null; size_in_bytes: string | null; size_in_bytes_bytes: number[] | null; inode: number[] | null; object_id: number | null; date_created: string | null; date_modified: string | null; date_indexed: string | null; object: { id: number; pub_id: number[]; kind: number | null; key_id: number | null; hidden: boolean | null; favorite: boolean | null; important: boolean | null; note: string | null; date_created: string | null; date_accessed: string | null } | null } +export type FilePathWithObject = { id: number; pub_id: number[]; is_dir: boolean | null; cas_id: string | null; integrity_checksum: string | null; location_id: number | null; materialized_path: string | null; name: string | null; extension: string | null; hidden: boolean | null; size_in_bytes: string | null; size_in_bytes_bytes: number[] | null; inode: number[] | null; object_id: number | null; key_id: number | null; date_created: string | null; date_modified: string | null; date_indexed: string | null; object: { id: number; pub_id: number[]; kind: number | null; key_id: number | null; hidden: boolean | null; favorite: boolean | null; important: boolean | null; note: string | null; date_created: string | null; date_accessed: string | null } | null } export type Flash = { /** @@ -397,7 +398,7 @@ export type LibraryPreferences = { location?: { [key in string]: LocationSetting export type LightScanArgs = { location_id: number; sub_path: string } -export type ListenerStatus = { status: "Disabled" } | { status: "Enabling" } | { status: "Listening"; port: number } | { status: "Error"; error: string } +export type Listener2 = { id: string; name: string; addrs: string[] } export type Location = { id: number; pub_id: number[]; name: string | null; path: string | null; total_capacity: number | null; available_capacity: number | null; size_in_bytes: number[] | null; is_archived: boolean | null; generate_preview_media: boolean | null; sync_preview_media: boolean | null; hidden: boolean | null; date_created: string | null; instance_id: number | null } @@ -446,7 +447,7 @@ id: string; /** * name is the display name of the current node. This is set by the user and is shown in the UI. // TODO: Length validation so it can fit in DNS record */ -name: string; p2p_enabled: boolean; p2p_port: number | null; features: BackendFeature[]; preferences: NodePreferences; image_labeler_version: string | null }) & { data_path: string; p2p: P2PStatus; device_model: string | null } +name: string; identity: RemoteIdentity; p2p_ipv4_port: Port; p2p_ipv6_port: Port; p2p_discovery: P2PDiscoveryState; features: BackendFeature[]; preferences: NodePreferences; image_labeler_version: string | null }) & { data_path: string; listeners: Listener2[]; device_model: string | null } export type NonIndexedPathItem = { path: string; name: string; extension: string; kind: number; is_dir: boolean; date_created: string; date_modified: string; size_in_bytes_bytes: number[]; hidden: boolean } @@ -507,17 +508,19 @@ export type OrderAndPagination = { orderOnly: TOrder } | { export type Orientation = "Normal" | "CW90" | "CW180" | "CW270" | "MirroredVertical" | "MirroredHorizontal" | "MirroredHorizontalAnd90CW" | "MirroredHorizontalAnd270CW" +export type P2PDiscoveryState = "Everyone" | "ContactsOnly" | "Disabled" + /** * TODO: P2P event for the frontend */ export type P2PEvent = { type: "DiscoveredPeer"; identity: RemoteIdentity; metadata: PeerMetadata } | { type: "ExpiredPeer"; identity: RemoteIdentity } | { type: "ConnectedPeer"; identity: RemoteIdentity } | { type: "DisconnectedPeer"; identity: RemoteIdentity } | { type: "SpacedropRequest"; id: string; identity: RemoteIdentity; peer_name: string; files: string[] } | { type: "SpacedropProgress"; id: string; percent: number } | { type: "SpacedropTimedout"; id: string } | { type: "SpacedropRejected"; id: string } -export type P2PStatus = { ipv4: ListenerStatus; ipv6: ListenerStatus } - export type PeerMetadata = { name: string; operating_system: OperatingSystem | null; device_model: HardwareModel | null; version: string | null } export type PlusCode = string +export type Port = null | number + export type Range = { from: T } | { to: T } /** diff --git a/packages/client/src/hooks/useP2PEvents.tsx b/packages/client/src/hooks/useP2PEvents.tsx index e90816d32..6987e4644 100644 --- a/packages/client/src/hooks/useP2PEvents.tsx +++ b/packages/client/src/hooks/useP2PEvents.tsx @@ -63,6 +63,10 @@ export function P2PContextProvider({ children }: PropsWithChildren) { ); } +export function useP2PContextRaw() { + return useContext(Context); +} + export function useDiscoveredPeers() { return useContext(Context).discoveredPeers; } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index d310ebb1a..4f35fd35c 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -4731,10 +4731,6 @@ packages: peerDependencies: '@effect-ts/otel-node': '*' peerDependenciesMeta: - '@effect-ts/core': - optional: true - '@effect-ts/otel': - optional: true '@effect-ts/otel-node': optional: true dependencies: