mirror of
https://github.com/spacedriveapp/spacedrive
synced 2024-07-04 11:03:27 +00:00
rspc 0.1.3 prep (#756)
* Upgrade rspc + Specta + PCR * fix jsonrpc executor * fix invalidate excessive logging * fix `Protected` type * fix mobile * clippy * lol
This commit is contained in:
parent
86f387ad30
commit
85d0ddf6d3
434
Cargo.lock
generated
434
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
13
Cargo.toml
13
Cargo.toml
|
@ -13,24 +13,24 @@ members = [
|
|||
]
|
||||
|
||||
[workspace.dependencies]
|
||||
prisma-client-rust = { git = "https://github.com/Brendonovich/prisma-client-rust", branch = "spacedrive", features = [
|
||||
prisma-client-rust = { git = "https://github.com/Brendonovich/prisma-client-rust", tag = "0.6.8", features = [
|
||||
"rspc",
|
||||
"sqlite-create-many",
|
||||
"migrations",
|
||||
"sqlite",
|
||||
], default-features = false }
|
||||
prisma-client-rust-cli = { git = "https://github.com/Brendonovich/prisma-client-rust", branch = "spacedrive", features = [
|
||||
prisma-client-rust-cli = { git = "https://github.com/Brendonovich/prisma-client-rust", tag = "0.6.8", features = [
|
||||
"rspc",
|
||||
"sqlite-create-many",
|
||||
"migrations",
|
||||
"sqlite",
|
||||
], default-features = false }
|
||||
prisma-client-rust-sdk = { git = "https://github.com/Brendonovich/prisma-client-rust", branch = "spacedrive", features = [
|
||||
prisma-client-rust-sdk = { git = "https://github.com/Brendonovich/prisma-client-rust", tag = "0.6.8", features = [
|
||||
"sqlite",
|
||||
], default-features = false }
|
||||
|
||||
rspc = { version = "0.1.2" }
|
||||
specta = { version = "0.0.6" }
|
||||
rspc = { version = "0.1.4" }
|
||||
specta = { version = "1.0.3" }
|
||||
httpz = { version = "0.0.3" }
|
||||
|
||||
swift-rs = { version = "1.0.1" }
|
||||
|
@ -45,6 +45,5 @@ if-watch = { git = "https://github.com/oscartbeaumont/if-watch", rev = "410e8e1d
|
|||
|
||||
mdns-sd = { git = "https://github.com/oscartbeaumont/mdns-sd", rev = "45515a98e9e408c102871abaa5a9bff3bee0cbe8" } # TODO: Do upstream PR
|
||||
|
||||
rspc = { git = "https://github.com/oscartbeaumont/rspc", rev = "c03872c0ba29d2429e9c059dfb235cdd03e15e8c" } # TODO: Move back to crates.io when new jsonrpc executor + `tokio::spawn` in the Tauri IPC plugin + upgraded Tauri version is released
|
||||
specta = { git = "https://github.com/oscartbeaumont/rspc", rev = "c03872c0ba29d2429e9c059dfb235cdd03e15e8c" }
|
||||
rspc = { git = "https://github.com/oscartbeaumont/rspc", rev = "9cb64def32aef5a987f9f06f727b4160c321b5f8" }
|
||||
httpz = { git = "https://github.com/oscartbeaumont/httpz", rev = "a5185f2ed2fdefeb2f582dce38a692a1bf76d1d6" }
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
<p align="center">
|
||||
<a href="#">
|
||||
|
||||
|
||||
</a>
|
||||
<p align="center">
|
||||
<img width="150" height="150" src="https://github.com/spacedriveapp/spacedrive/blob/main/packages/assets/images/AppLogo.png" alt="Logo">
|
||||
|
@ -28,7 +28,7 @@
|
|||
<i>~ Links will be added once a release is available. ~</i>
|
||||
</p>
|
||||
</p>
|
||||
Spacedrive is an open source cross-platform file manager, powered by a virtual distributed filesystem (<a href="#what-is-a-vdfs">VDFS</a>) written in Rust.
|
||||
Spacedrive is an open source cross-platform file manager, powered by a virtual distributed filesystem (<a href="#what-is-a-vdfs">VDFS</a>) written in Rust.
|
||||
<br/>
|
||||
<br/>
|
||||
|
||||
|
@ -83,7 +83,7 @@ This project is using what I'm calling the **"PRRTT"** stack (Prisma, Rust, Reac
|
|||
|
||||
- Prisma on the front-end? 🤯 Made possible thanks to [prisma-client-rust](https://github.com/brendonovich/prisma-client-rust), developed by [Brendonovich](https://github.com/brendonovich). Gives us access to the powerful migration CLI in development, along with the Prisma syntax for our schema. The application bundles with the Prisma query engine and codegen for a beautiful Rust API. Our lightweight migration runner is custom built for a desktop app context.
|
||||
- Tauri allows us to create a pure Rust native OS webview, without the overhead of your average Electron app. This brings the bundle size and average memory usage down dramatically. It also contributes to a more native feel, especially on macOS due to Safari's close integration with the OS.
|
||||
- We also use [rspc](https://rspc.otbeaumont.me) which allows us to define functions in Rust and call them on the Typescript frontend in a completely typesafe manner, so no unnecessary bugs make it into production!
|
||||
- We also use [rspc](https://rspc.dev) which allows us to define functions in Rust and call them on the Typescript frontend in a completely typesafe manner, so no unnecessary bugs make it into production!
|
||||
- The core (`sdcore`) is written in pure Rust.
|
||||
|
||||
## Monorepo structure:
|
||||
|
|
|
@ -20,3 +20,5 @@ openssl-sys = { version = "0.9.76", features = [
|
|||
] } # Override features of transitive dependencies to support IOS Simulator on M1
|
||||
futures = "0.3.24"
|
||||
tracing = "0.1.37"
|
||||
futures-channel = "0.3.28"
|
||||
futures-locks = "0.7.1"
|
||||
|
|
|
@ -1,15 +1,19 @@
|
|||
use futures::future::join_all;
|
||||
use futures::{future::join_all, StreamExt};
|
||||
use futures_channel::mpsc;
|
||||
use once_cell::sync::{Lazy, OnceCell};
|
||||
use rspc::internal::jsonrpc::*;
|
||||
use rspc::internal::jsonrpc::{self, *};
|
||||
use sd_core::{api::Router, Node};
|
||||
use serde_json::{from_str, from_value, to_string, Value};
|
||||
use std::{collections::HashMap, marker::Send, sync::Arc};
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
collections::HashMap,
|
||||
future::{ready, Ready},
|
||||
marker::Send,
|
||||
sync::Arc,
|
||||
};
|
||||
use tokio::{
|
||||
runtime::Runtime,
|
||||
sync::{
|
||||
mpsc::{unbounded_channel, UnboundedSender},
|
||||
oneshot, Mutex,
|
||||
},
|
||||
sync::{oneshot, Mutex},
|
||||
};
|
||||
use tracing::error;
|
||||
|
||||
|
@ -19,10 +23,38 @@ pub type NodeType = Lazy<Mutex<Option<(Arc<Node>, Arc<Router>)>>>;
|
|||
|
||||
pub static NODE: NodeType = Lazy::new(|| Mutex::new(None));
|
||||
|
||||
pub static SUBSCRIPTIONS: Lazy<Mutex<HashMap<RequestId, oneshot::Sender<()>>>> =
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub static SUBSCRIPTIONS: Lazy<Arc<futures_locks::Mutex<HashMap<RequestId, oneshot::Sender<()>>>>> =
|
||||
Lazy::new(Default::default);
|
||||
|
||||
pub static EVENT_SENDER: OnceCell<UnboundedSender<Response>> = OnceCell::new();
|
||||
pub static EVENT_SENDER: OnceCell<mpsc::Sender<Response>> = OnceCell::new();
|
||||
|
||||
pub struct MobileSender<'a> {
|
||||
resp: &'a mut Option<Response>,
|
||||
}
|
||||
|
||||
impl<'a> Sender<'a> for MobileSender<'a> {
|
||||
type SendFut = Ready<()>;
|
||||
type SubscriptionMap = Arc<futures_locks::Mutex<HashMap<RequestId, oneshot::Sender<()>>>>;
|
||||
type OwnedSender = OwnedMpscSender;
|
||||
|
||||
fn subscription(self) -> SubscriptionUpgrade<'a, Self> {
|
||||
SubscriptionUpgrade::Supported(
|
||||
OwnedMpscSender::new(
|
||||
EVENT_SENDER
|
||||
.get()
|
||||
.expect("Core was not started before making a request!")
|
||||
.clone(),
|
||||
),
|
||||
SUBSCRIPTIONS.clone(),
|
||||
)
|
||||
}
|
||||
|
||||
fn send(self, resp: jsonrpc::Response) -> Self::SendFut {
|
||||
*self.resp = Some(resp);
|
||||
ready(())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn handle_core_msg(
|
||||
query: String,
|
||||
|
@ -59,22 +91,15 @@ pub fn handle_core_msg(
|
|||
let node = node.clone();
|
||||
let router = router.clone();
|
||||
async move {
|
||||
let mut channel = EVENT_SENDER.get().unwrap().clone();
|
||||
let mut resp = Sender::ResponseAndChannel(None, &mut channel);
|
||||
|
||||
let mut resp = Option::<Response>::None;
|
||||
handle_json_rpc(
|
||||
node.clone(),
|
||||
request,
|
||||
&router,
|
||||
&mut resp,
|
||||
&mut SubscriptionMap::Mutex(&SUBSCRIPTIONS),
|
||||
Cow::Borrowed(&router),
|
||||
MobileSender { resp: &mut resp },
|
||||
)
|
||||
.await;
|
||||
|
||||
match resp {
|
||||
Sender::ResponseAndChannel(resp, _) => resp,
|
||||
_ => unreachable!(),
|
||||
}
|
||||
resp
|
||||
}
|
||||
}))
|
||||
.await;
|
||||
|
@ -87,11 +112,11 @@ pub fn handle_core_msg(
|
|||
}
|
||||
|
||||
pub fn spawn_core_event_listener(callback: impl Fn(String) + Send + 'static) {
|
||||
let (tx, mut rx) = unbounded_channel();
|
||||
let (tx, mut rx) = mpsc::channel(100);
|
||||
let _ = EVENT_SENDER.set(tx);
|
||||
|
||||
RUNTIME.spawn(async move {
|
||||
while let Some(event) = rx.recv().await {
|
||||
while let Some(event) = rx.next().await {
|
||||
let data = match to_string(&event) {
|
||||
Ok(json) => json,
|
||||
Err(err) => {
|
||||
|
|
|
@ -23,6 +23,7 @@ sync-messages = []
|
|||
sd-ffmpeg = { path = "../crates/ffmpeg", optional = true }
|
||||
sd-crypto = { path = "../crates/crypto", features = [
|
||||
"rspc",
|
||||
"specta",
|
||||
"serde",
|
||||
"keymanager",
|
||||
] }
|
||||
|
@ -30,7 +31,7 @@ sd-file-ext = { path = "../crates/file-ext" }
|
|||
sd-sync = { path = "../crates/sync" }
|
||||
sd-p2p = { path = "../crates/p2p", features = ["specta", "serde"] }
|
||||
|
||||
rspc = { workspace = true, features = ["uuid", "chrono", "tracing"] }
|
||||
rspc = { workspace = true, features = ["uuid", "chrono", "tracing", "unstable"] }
|
||||
httpz = { workspace = true }
|
||||
prisma-client-rust = { workspace = true }
|
||||
specta = { workspace = true }
|
||||
|
|
|
@ -9,8 +9,9 @@ use crate::{
|
|||
prisma::{location, object},
|
||||
};
|
||||
|
||||
use rspc::{ErrorCode, Type};
|
||||
use rspc::ErrorCode;
|
||||
use serde::Deserialize;
|
||||
use specta::Type;
|
||||
use std::path::Path;
|
||||
use tokio::fs;
|
||||
|
||||
|
|
|
@ -8,8 +8,8 @@ use crate::{
|
|||
},
|
||||
};
|
||||
|
||||
use rspc::Type;
|
||||
use serde::Deserialize;
|
||||
use specta::Type;
|
||||
use std::path::PathBuf;
|
||||
use uuid::Uuid;
|
||||
|
||||
|
|
|
@ -6,8 +6,8 @@ use crate::{
|
|||
};
|
||||
|
||||
use chrono::Utc;
|
||||
use rspc::Type;
|
||||
use serde::Deserialize;
|
||||
use specta::Type;
|
||||
use tracing::debug;
|
||||
use uuid::Uuid;
|
||||
|
||||
|
|
|
@ -13,8 +13,9 @@ use std::{
|
|||
path::{PathBuf, MAIN_SEPARATOR, MAIN_SEPARATOR_STR},
|
||||
};
|
||||
|
||||
use rspc::{self, ErrorCode, RouterBuilderLike, Type};
|
||||
use rspc::{self, ErrorCode, RouterBuilderLike};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use specta::Type;
|
||||
|
||||
use super::{utils::LibraryRequest, Ctx, RouterBuilder};
|
||||
|
||||
|
@ -49,7 +50,7 @@ pub struct ExplorerData {
|
|||
file_path::include!(file_path_with_object { object });
|
||||
object::include!(object_with_file_paths { file_paths });
|
||||
|
||||
pub(crate) fn mount() -> impl RouterBuilderLike<Ctx> {
|
||||
pub(crate) fn mount() -> impl RouterBuilderLike<Ctx, Meta = ()> {
|
||||
<RouterBuilder>::new()
|
||||
.library_query("list", |t| {
|
||||
t(|_, _: (), library| async move {
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
use chrono::{DateTime, Utc};
|
||||
use prisma_client_rust::{operator::or, Direction};
|
||||
use rspc::{Config, Type};
|
||||
use rspc::Config;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use specta::Type;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::{
|
||||
|
@ -215,16 +216,16 @@ pub(crate) fn mount() -> Arc<Router> {
|
|||
Ok(items)
|
||||
})
|
||||
})
|
||||
.yolo_merge("library.", libraries::mount())
|
||||
.yolo_merge("volumes.", volumes::mount())
|
||||
.yolo_merge("tags.", tags::mount())
|
||||
.yolo_merge("keys.", keys::mount())
|
||||
.yolo_merge("locations.", locations::mount())
|
||||
.yolo_merge("files.", files::mount())
|
||||
.yolo_merge("jobs.", jobs::mount())
|
||||
.yolo_merge("p2p.", p2p::mount())
|
||||
.yolo_merge("sync.", sync::mount())
|
||||
.yolo_merge("invalidation.", utils::mount_invalidate())
|
||||
.merge("library.", libraries::mount())
|
||||
.merge("volumes.", volumes::mount())
|
||||
.merge("tags.", tags::mount())
|
||||
.merge("keys.", keys::mount())
|
||||
.merge("locations.", locations::mount())
|
||||
.merge("files.", files::mount())
|
||||
.merge("jobs.", jobs::mount())
|
||||
.merge("p2p.", p2p::mount())
|
||||
.merge("sync.", sync::mount())
|
||||
.merge("invalidation.", utils::mount_invalidate())
|
||||
.build()
|
||||
.arced();
|
||||
InvalidRequests::validate(r.clone()); // This validates all invalidation calls.
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use rspc::Type;
|
||||
use sd_p2p::PeerId;
|
||||
use serde::Deserialize;
|
||||
use specta::Type;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use crate::p2p::P2PEvent;
|
||||
|
|
|
@ -5,12 +5,9 @@ use super::{utils::LibraryRequest, RouterBuilder};
|
|||
pub fn mount() -> RouterBuilder {
|
||||
RouterBuilder::new()
|
||||
.library_subscription("newMessage", |t| {
|
||||
t(|ctx, _: (), library_id| {
|
||||
t(|_, _: (), library| {
|
||||
async_stream::stream! {
|
||||
let Some(lib) = ctx.library_manager.get_ctx(library_id).await else {
|
||||
return
|
||||
};
|
||||
let mut rx = lib.sync.tx.subscribe();
|
||||
let mut rx = library.sync.tx.subscribe();
|
||||
while let Ok(msg) = rx.recv().await {
|
||||
let op = match msg {
|
||||
SyncMessage::Ingested(op) => op,
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
use rspc::{ErrorCode, Type};
|
||||
use rspc::ErrorCode;
|
||||
use serde::Deserialize;
|
||||
use specta::Type;
|
||||
|
||||
use serde_json::json;
|
||||
use tracing::info;
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
use crate::api::{CoreEvent, Router, RouterBuilder};
|
||||
|
||||
use async_stream::stream;
|
||||
use rspc::{internal::specta::DataType, Type};
|
||||
use serde::Serialize;
|
||||
use serde_hashkey::to_key;
|
||||
use serde_json::Value;
|
||||
use specta::{DataType, Type};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::{
|
||||
|
@ -14,7 +14,7 @@ use std::{
|
|||
time::Duration,
|
||||
};
|
||||
use tokio::sync::broadcast;
|
||||
use tracing::warn;
|
||||
use tracing::{debug, warn};
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
use std::sync::Mutex;
|
||||
|
@ -218,7 +218,7 @@ macro_rules! invalidate_query {
|
|||
|
||||
pub fn mount_invalidate() -> RouterBuilder {
|
||||
let (tx, _) = broadcast::channel(100);
|
||||
let manager_thread_active = AtomicBool::new(false);
|
||||
let manager_thread_active = Arc::new(AtomicBool::new(false));
|
||||
|
||||
// TODO: Scope the invalidate queries to a specific library (filtered server side)
|
||||
RouterBuilder::new().subscription("listen", move |t| {
|
||||
|
@ -229,6 +229,7 @@ pub fn mount_invalidate() -> RouterBuilder {
|
|||
if !manager_thread_active.swap(true, Ordering::Relaxed) {
|
||||
let mut event_bus_rx = ctx.event_bus.0.subscribe();
|
||||
let tx = tx.clone();
|
||||
let manager_thread_active = manager_thread_active.clone();
|
||||
tokio::spawn(async move {
|
||||
let mut buf = HashMap::with_capacity(100);
|
||||
|
||||
|
@ -249,8 +250,14 @@ pub fn mount_invalidate() -> RouterBuilder {
|
|||
let x = buf.drain().map(|(_k, v)| v).collect::<Vec<_>>();
|
||||
match tx.send(x) {
|
||||
Ok(_) => {},
|
||||
Err(_) => warn!("Error emitting invalidation manager events!"),
|
||||
// All receivers are shutdown means that all clients are disconnected.
|
||||
Err(_) => {
|
||||
debug!("Shutting down invalidation manager! This is normal if all clients disconnects.");
|
||||
manager_thread_active.swap(false, Ordering::Relaxed);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
use std::sync::Arc;
|
||||
use std::{borrow::Cow, marker::PhantomData, panic::Location, process, sync::Arc};
|
||||
|
||||
use futures::Stream;
|
||||
use rspc::{
|
||||
internal::{
|
||||
specta, BuiltProcedureBuilder, MiddlewareBuilderLike, RequestResult,
|
||||
BuiltProcedureBuilder, LayerResult, MiddlewareBuilderLike, ResolverLayer,
|
||||
UnbuiltProcedureBuilder,
|
||||
},
|
||||
ErrorCode, Type,
|
||||
is_invalid_procedure_name, typedef, ErrorCode, ExecError, RequestLayer, StreamRequestLayer,
|
||||
};
|
||||
use serde::{de::DeserializeOwned, Deserialize, Serialize};
|
||||
use specta::Type;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{api::Ctx, library::Library};
|
||||
|
@ -22,77 +22,81 @@ pub(crate) struct LibraryArgs<T> {
|
|||
|
||||
// WARNING: This is system is using internal API's which means it will break between rspc release. I would avoid copying it unless you understand the cost of maintaining it!
|
||||
pub trait LibraryRequest {
|
||||
fn library_query<TUnbuiltResolver, TUnbuiltResult, TUnbuiltResultMarker, TBuiltResolver, TArg>(
|
||||
fn library_query<TResolver, TArg, TResult, TResultMarker>(
|
||||
self,
|
||||
key: &'static str,
|
||||
builder: impl FnOnce(
|
||||
UnbuiltProcedureBuilder<Ctx, TUnbuiltResolver>,
|
||||
) -> BuiltProcedureBuilder<TBuiltResolver>,
|
||||
UnbuiltProcedureBuilder<Ctx, TResolver>,
|
||||
) -> BuiltProcedureBuilder<TResolver>,
|
||||
) -> Self
|
||||
where
|
||||
TUnbuiltResolver: Fn(Ctx, TArg, Library) -> TUnbuiltResult + Send,
|
||||
TBuiltResolver: Fn(Ctx, TArg, Library) -> TUnbuiltResult + Send + Sync + 'static,
|
||||
TUnbuiltResult: RequestResult<TUnbuiltResultMarker> + Send,
|
||||
TArg: DeserializeOwned + specta::Type + Send + 'static;
|
||||
TArg: DeserializeOwned + Type + Send + 'static,
|
||||
TResult: RequestLayer<TResultMarker> + Send,
|
||||
TResolver: Fn(Ctx, TArg, Library) -> TResult + Send + Sync + 'static;
|
||||
|
||||
fn library_mutation<
|
||||
TUnbuiltResolver,
|
||||
TUnbuiltResult,
|
||||
TUnbuiltResultMarker,
|
||||
TBuiltResolver,
|
||||
TArg,
|
||||
>(
|
||||
fn library_mutation<TResolver, TArg, TResult, TResultMarker>(
|
||||
self,
|
||||
key: &'static str,
|
||||
builder: impl FnOnce(
|
||||
UnbuiltProcedureBuilder<Ctx, TUnbuiltResolver>,
|
||||
) -> BuiltProcedureBuilder<TBuiltResolver>,
|
||||
UnbuiltProcedureBuilder<Ctx, TResolver>,
|
||||
) -> BuiltProcedureBuilder<TResolver>,
|
||||
) -> Self
|
||||
where
|
||||
TUnbuiltResolver: Fn(Ctx, TArg, Library) -> TUnbuiltResult + Send,
|
||||
TBuiltResolver: Fn(Ctx, TArg, Library) -> TUnbuiltResult + Send + Sync + 'static,
|
||||
TUnbuiltResult: RequestResult<TUnbuiltResultMarker> + Send,
|
||||
TArg: DeserializeOwned + specta::Type + Send + 'static;
|
||||
TArg: DeserializeOwned + Type + Send + 'static,
|
||||
TResult: RequestLayer<TResultMarker> + Send,
|
||||
TResolver: Fn(Ctx, TArg, Library) -> TResult + Send + Sync + 'static;
|
||||
|
||||
fn library_subscription<TResolver, TArg, TStream, TResult>(
|
||||
fn library_subscription<F, TArg, TResult, TResultMarker>(
|
||||
self,
|
||||
key: &'static str,
|
||||
builder: impl Fn(UnbuiltProcedureBuilder<Ctx, TResolver>) -> BuiltProcedureBuilder<TResolver>,
|
||||
builder: impl FnOnce(UnbuiltProcedureBuilder<Ctx, F>) -> BuiltProcedureBuilder<F>,
|
||||
) -> Self
|
||||
where
|
||||
TArg: DeserializeOwned + specta::Type + 'static,
|
||||
TStream: Stream<Item = TResult> + Send + Sync + 'static,
|
||||
TResult: Serialize + specta::Type,
|
||||
TResolver: Fn(Ctx, TArg, Uuid) -> TStream + Send + Sync + 'static;
|
||||
F: Fn(Ctx, TArg, Library) -> TResult + Send + Sync + 'static,
|
||||
TArg: DeserializeOwned + Type + Send + 'static,
|
||||
TResult: StreamRequestLayer<TResultMarker> + Send;
|
||||
}
|
||||
|
||||
// Note: This will break with middleware context switching but that's fine for now
|
||||
impl<TMiddleware> LibraryRequest for rspc::RouterBuilder<Ctx, TMiddleware>
|
||||
impl<TMiddleware> LibraryRequest for rspc::RouterBuilder<Ctx, (), TMiddleware>
|
||||
where
|
||||
TMiddleware: MiddlewareBuilderLike<Ctx, LayerContext = Ctx> + Send + 'static,
|
||||
{
|
||||
fn library_query<TUnbuiltResolver, TUnbuiltResult, TUnbuiltResultMarker, TBuiltResolver, TArg>(
|
||||
self,
|
||||
fn library_query<TResolver, TArg, TResult, TResultMarker>(
|
||||
mut self,
|
||||
key: &'static str,
|
||||
builder: impl FnOnce(
|
||||
UnbuiltProcedureBuilder<Ctx, TUnbuiltResolver>,
|
||||
) -> BuiltProcedureBuilder<TBuiltResolver>,
|
||||
UnbuiltProcedureBuilder<Ctx, TResolver>,
|
||||
) -> BuiltProcedureBuilder<TResolver>,
|
||||
) -> Self
|
||||
where
|
||||
TUnbuiltResolver: Fn(Ctx, TArg, Library) -> TUnbuiltResult + Send,
|
||||
TBuiltResolver: Fn(Ctx, TArg, Library) -> TUnbuiltResult + Send + Sync + 'static,
|
||||
TUnbuiltResult: RequestResult<TUnbuiltResultMarker> + Send,
|
||||
TArg: DeserializeOwned + specta::Type + Send + 'static,
|
||||
TArg: DeserializeOwned + Type + Send + 'static,
|
||||
TResult: RequestLayer<TResultMarker> + Send,
|
||||
TResolver: Fn(Ctx, TArg, Library) -> TResult + Send + Sync + 'static,
|
||||
{
|
||||
self.query(key, move |t| {
|
||||
let resolver = Arc::new(builder(UnbuiltProcedureBuilder::from_builder(&t)).resolver);
|
||||
if is_invalid_procedure_name(key) {
|
||||
eprintln!(
|
||||
"{}: rspc error: attempted to attach a query with the key '{}', however this name is not allowed. ",
|
||||
Location::caller(),
|
||||
key
|
||||
);
|
||||
process::exit(1);
|
||||
}
|
||||
|
||||
t(move |ctx, arg: LibraryArgs<TArg>| {
|
||||
let resolver = Arc::new(builder(UnbuiltProcedureBuilder::default()).resolver);
|
||||
let ty =
|
||||
typedef::<LibraryArgs<TArg>, TResult::Result>(Cow::Borrowed(key), self.typ_store())
|
||||
.unwrap();
|
||||
let layer = self.prev_middleware().build(ResolverLayer {
|
||||
func: move |ctx: Ctx, input, _| {
|
||||
let resolver = resolver.clone();
|
||||
async move {
|
||||
Ok(LayerResult::FutureValueOrStream(Box::pin(async move {
|
||||
let args: LibraryArgs<TArg> =
|
||||
serde_json::from_value(input).map_err(ExecError::DeserializingArgErr)?;
|
||||
|
||||
let library = ctx
|
||||
.library_manager
|
||||
.get_ctx(arg.library_id)
|
||||
.get_ctx(args.library_id)
|
||||
.await
|
||||
.ok_or_else(|| {
|
||||
rspc::Error::new(
|
||||
|
@ -102,43 +106,53 @@ where
|
|||
)
|
||||
})?;
|
||||
|
||||
Ok(resolver(ctx, arg.arg, library)
|
||||
.into_request_future()?
|
||||
.exec()
|
||||
.await?)
|
||||
}
|
||||
})
|
||||
})
|
||||
resolver(ctx, args.arg, library)
|
||||
.into_layer_result()?
|
||||
.into_value_or_stream()
|
||||
.await
|
||||
})))
|
||||
},
|
||||
phantom: PhantomData,
|
||||
});
|
||||
self.queries().append(key.into(), layer, ty);
|
||||
self
|
||||
}
|
||||
|
||||
fn library_mutation<
|
||||
TUnbuiltResolver,
|
||||
TUnbuiltResult,
|
||||
TUnbuiltResultMarker,
|
||||
TBuiltResolver,
|
||||
TArg,
|
||||
>(
|
||||
self,
|
||||
fn library_mutation<TResolver, TArg, TResult, TResultMarker>(
|
||||
mut self,
|
||||
key: &'static str,
|
||||
builder: impl FnOnce(
|
||||
UnbuiltProcedureBuilder<Ctx, TUnbuiltResolver>,
|
||||
) -> BuiltProcedureBuilder<TBuiltResolver>,
|
||||
UnbuiltProcedureBuilder<Ctx, TResolver>,
|
||||
) -> BuiltProcedureBuilder<TResolver>,
|
||||
) -> Self
|
||||
where
|
||||
TUnbuiltResolver: Fn(Ctx, TArg, Library) -> TUnbuiltResult + Send,
|
||||
TBuiltResolver: Fn(Ctx, TArg, Library) -> TUnbuiltResult + Send + Sync + 'static,
|
||||
TUnbuiltResult: RequestResult<TUnbuiltResultMarker> + Send,
|
||||
TArg: DeserializeOwned + specta::Type + Send + 'static,
|
||||
TArg: DeserializeOwned + Type + Send + 'static,
|
||||
TResult: RequestLayer<TResultMarker> + Send,
|
||||
TResolver: Fn(Ctx, TArg, Library) -> TResult + Send + Sync + 'static,
|
||||
{
|
||||
self.mutation(key, move |t| {
|
||||
let resolver = Arc::new(builder(UnbuiltProcedureBuilder::from_builder(&t)).resolver);
|
||||
if is_invalid_procedure_name(key) {
|
||||
eprintln!(
|
||||
"{}: rspc error: attempted to attach a mutation with the key '{}', however this name is not allowed. ",
|
||||
Location::caller(),
|
||||
key
|
||||
);
|
||||
process::exit(1);
|
||||
}
|
||||
|
||||
t(move |ctx, arg: LibraryArgs<TArg>| {
|
||||
let resolver = Arc::new(builder(UnbuiltProcedureBuilder::default()).resolver);
|
||||
let ty =
|
||||
typedef::<LibraryArgs<TArg>, TResult::Result>(Cow::Borrowed(key), self.typ_store())
|
||||
.unwrap();
|
||||
let layer = self.prev_middleware().build(ResolverLayer {
|
||||
func: move |ctx: Ctx, input, _| {
|
||||
let resolver = resolver.clone();
|
||||
async move {
|
||||
Ok(LayerResult::FutureValueOrStream(Box::pin(async move {
|
||||
let args: LibraryArgs<TArg> =
|
||||
serde_json::from_value(input).map_err(ExecError::DeserializingArgErr)?;
|
||||
|
||||
let library = ctx
|
||||
.library_manager
|
||||
.get_ctx(arg.library_id)
|
||||
.get_ctx(args.library_id)
|
||||
.await
|
||||
.ok_or_else(|| {
|
||||
rspc::Error::new(
|
||||
|
@ -148,44 +162,69 @@ where
|
|||
)
|
||||
})?;
|
||||
|
||||
Ok(resolver(ctx, arg.arg, library)
|
||||
.into_request_future()?
|
||||
.exec()
|
||||
.await?)
|
||||
}
|
||||
})
|
||||
})
|
||||
resolver(ctx, args.arg, library)
|
||||
.into_layer_result()?
|
||||
.into_value_or_stream()
|
||||
.await
|
||||
})))
|
||||
},
|
||||
phantom: PhantomData,
|
||||
});
|
||||
self.mutations().append(key.into(), layer, ty);
|
||||
self
|
||||
}
|
||||
|
||||
fn library_subscription<TResolver, TArg, TStream, TResult>(
|
||||
self,
|
||||
fn library_subscription<F, TArg, TResult, TResultMarker>(
|
||||
mut self,
|
||||
key: &'static str,
|
||||
builder: impl Fn(UnbuiltProcedureBuilder<Ctx, TResolver>) -> BuiltProcedureBuilder<TResolver>,
|
||||
builder: impl FnOnce(UnbuiltProcedureBuilder<Ctx, F>) -> BuiltProcedureBuilder<F>,
|
||||
) -> Self
|
||||
where
|
||||
TArg: DeserializeOwned + specta::Type + 'static,
|
||||
TStream: Stream<Item = TResult> + Send + Sync + 'static,
|
||||
TResult: Serialize + specta::Type,
|
||||
TResolver: Fn(Ctx, TArg, Uuid) -> TStream + Send + Sync + 'static,
|
||||
F: Fn(Ctx, TArg, Library) -> TResult + Send + Sync + 'static,
|
||||
TArg: DeserializeOwned + Type + Send + 'static,
|
||||
TResult: StreamRequestLayer<TResultMarker> + Send,
|
||||
{
|
||||
self.subscription(key, |t| {
|
||||
let resolver = Arc::new(builder(UnbuiltProcedureBuilder::from_builder(&t)).resolver);
|
||||
if is_invalid_procedure_name(key) {
|
||||
eprintln!(
|
||||
"{}: rspc error: attempted to attach a subscription with the key '{}', however this name is not allowed. ",
|
||||
Location::caller(),
|
||||
key
|
||||
);
|
||||
process::exit(1);
|
||||
}
|
||||
|
||||
t(move |ctx, arg: LibraryArgs<TArg>| {
|
||||
// TODO(@Oscar): Upstream rspc work to allow this to work
|
||||
// let library = ctx
|
||||
// .library_manager
|
||||
// .get_ctx(arg.library_id)
|
||||
// .await
|
||||
// .ok_or_else(|| {
|
||||
// rspc::Error::new(
|
||||
// ErrorCode::BadRequest,
|
||||
// "You must specify a valid library to use this operation.".to_string(),
|
||||
// )
|
||||
// })?;
|
||||
let resolver = Arc::new(builder(UnbuiltProcedureBuilder::default()).resolver);
|
||||
let ty =
|
||||
typedef::<LibraryArgs<TArg>, TResult::Result>(Cow::Borrowed(key), self.typ_store())
|
||||
.unwrap();
|
||||
let layer = self.prev_middleware().build(ResolverLayer {
|
||||
func: move |ctx: Ctx, input, _| {
|
||||
let resolver = resolver.clone();
|
||||
Ok(LayerResult::FutureValueOrStream(Box::pin(async move {
|
||||
let args: LibraryArgs<TArg> =
|
||||
serde_json::from_value(input).map_err(ExecError::DeserializingArgErr)?;
|
||||
|
||||
resolver(ctx, arg.arg, arg.library_id)
|
||||
})
|
||||
})
|
||||
let library = ctx
|
||||
.library_manager
|
||||
.get_ctx(args.library_id)
|
||||
.await
|
||||
.ok_or_else(|| {
|
||||
rspc::Error::new(
|
||||
ErrorCode::BadRequest,
|
||||
"You must specify a valid library to use this operation."
|
||||
.to_string(),
|
||||
)
|
||||
})?;
|
||||
|
||||
resolver(ctx, args.arg, library)
|
||||
.into_layer_result()?
|
||||
.into_value_or_stream()
|
||||
.await
|
||||
})))
|
||||
},
|
||||
phantom: PhantomData,
|
||||
});
|
||||
self.subscriptions().append(key.into(), layer, ty);
|
||||
self
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,8 +31,8 @@ use std::{
|
|||
|
||||
use chrono::{DateTime, Utc};
|
||||
use prisma_client_rust::Direction;
|
||||
use rspc::Type;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use specta::Type;
|
||||
use thiserror::Error;
|
||||
use tokio::{
|
||||
sync::{broadcast, mpsc, Mutex, RwLock},
|
||||
|
|
|
@ -26,6 +26,7 @@ pub(crate) mod sync;
|
|||
pub(crate) mod util;
|
||||
pub(crate) mod volume;
|
||||
|
||||
#[allow(warnings, unused)]
|
||||
pub(crate) mod prisma;
|
||||
pub(crate) mod prisma_sync;
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use std::{marker::PhantomData, path::PathBuf};
|
||||
|
||||
use rspc::Type;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use specta::Type;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{migrations, util::migrator::FileMigrator};
|
||||
|
|
|
@ -7,8 +7,8 @@ use crate::{
|
|||
use chrono::{DateTime, Utc};
|
||||
use globset::{Glob, GlobSet, GlobSetBuilder};
|
||||
use rmp_serde;
|
||||
use rspc::Type;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use specta::Type;
|
||||
use std::{collections::HashSet, path::Path};
|
||||
use tokio::fs;
|
||||
|
||||
|
|
|
@ -23,9 +23,9 @@ use std::{
|
|||
use futures::future::TryFutureExt;
|
||||
use normpath::PathExt;
|
||||
use prisma_client_rust::QueryError;
|
||||
use rspc::Type;
|
||||
use serde::Deserialize;
|
||||
use serde_json::json;
|
||||
use specta::Type;
|
||||
use tokio::{fs, io};
|
||||
use tracing::{debug, info};
|
||||
use uuid::Uuid;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use rspc::Type;
|
||||
use sd_p2p::Keypair;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use specta::Type;
|
||||
use std::{
|
||||
marker::PhantomData,
|
||||
path::{Path, PathBuf},
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
use crate::{prisma::node, NodeError};
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use rspc::Type;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use specta::Type;
|
||||
use uuid::Uuid;
|
||||
|
||||
mod config;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#![allow(dead_code, unused_variables)] // TODO: Reenable once this is working
|
||||
|
||||
use rspc::Type;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use specta::Type;
|
||||
use tokio::sync::{mpsc, oneshot};
|
||||
|
||||
pub enum PeerRequest {
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use crate::prisma::{file_path, object};
|
||||
|
||||
use rspc::Type;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use specta::Type;
|
||||
|
||||
pub mod cas;
|
||||
pub mod file_identifier;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use prisma_client_rust::QueryError;
|
||||
use rspc::Type;
|
||||
use serde::Deserialize;
|
||||
use specta::Type;
|
||||
|
||||
use uuid::Uuid;
|
||||
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
use std::{path::PathBuf, sync::Arc, time::Instant};
|
||||
|
||||
use rspc::Type;
|
||||
use sd_p2p::{
|
||||
spaceblock::{BlockSize, TransferRequest},
|
||||
Event, Manager, MetadataManager, PeerId,
|
||||
};
|
||||
use sd_sync::CRDTOperation;
|
||||
use serde::Serialize;
|
||||
use specta::Type;
|
||||
use tokio::{
|
||||
fs::File,
|
||||
io::{AsyncReadExt, AsyncWriteExt, BufReader},
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
use std::{collections::HashMap, env, str::FromStr};
|
||||
|
||||
use rspc::Type;
|
||||
use sd_p2p::Metadata;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use specta::Type;
|
||||
|
||||
#[derive(Debug, Clone, Type, Serialize, Deserialize)]
|
||||
pub struct PeerMetadata {
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
use crate::{library::Library, prisma::volume::*};
|
||||
|
||||
use rspc::Type;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_with::{serde_as, DisplayFromStr};
|
||||
use specta::Type;
|
||||
use std::process::Command;
|
||||
use sysinfo::{DiskExt, System, SystemExt};
|
||||
use thiserror::Error;
|
||||
|
|
|
@ -8,7 +8,8 @@ edition = "2021"
|
|||
rust-version = "1.67.0"
|
||||
|
||||
[features]
|
||||
rspc = ["dep:rspc"]
|
||||
rspc = ["dep:rspc", "dep:specta"]
|
||||
specta = ["dep:specta"]
|
||||
serde = ["dep:serde", "dep:serde_json", "dep:serde-big-array", "uuid/serde"]
|
||||
keymanager = ["dep:dashmap", "os-keyrings"]
|
||||
os-keyrings = ["dep:secret-service", "dep:security-framework"]
|
||||
|
@ -46,7 +47,8 @@ uuid = { version = "1.1.2", features = ["v4"] }
|
|||
dashmap = { version = "5.4.0", optional = true }
|
||||
|
||||
# optional, for support with rspc
|
||||
rspc = { workspace = true, features = ["uuid"], optional = true }
|
||||
rspc = { workspace = true, features = [], optional = true }
|
||||
specta = { workspace = true, features = ["uuid"], optional = true }
|
||||
|
||||
# for asynchronous crypto
|
||||
tokio = { workspace = true, features = ["io-util", "rt-multi-thread", "sync"] }
|
||||
|
|
|
@ -62,7 +62,7 @@ use super::keyring::{Identifier, KeyringInterface};
|
|||
/// It contains no sensitive information that is not encrypted.
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
#[cfg_attr(feature = "rspc", derive(rspc::Type))]
|
||||
#[cfg_attr(feature = "specta", derive(specta::Type))]
|
||||
pub struct StoredKey {
|
||||
pub uuid: Uuid, // uuid for identification. shared with mounted keys
|
||||
pub version: StoredKeyVersion,
|
||||
|
@ -82,7 +82,7 @@ pub struct StoredKey {
|
|||
/// This denotes the type of key. `Root` keys can be used to unlock the key manager, and `User` keys are ordinary keys.
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
#[cfg_attr(feature = "rspc", derive(rspc::Type))]
|
||||
#[cfg_attr(feature = "specta", derive(specta::Type))]
|
||||
pub enum StoredKeyType {
|
||||
User,
|
||||
Root,
|
||||
|
@ -91,7 +91,7 @@ pub enum StoredKeyType {
|
|||
/// This denotes the `StoredKey` version.
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
#[cfg_attr(feature = "rspc", derive(rspc::Type))]
|
||||
#[cfg_attr(feature = "specta", derive(specta::Type))]
|
||||
pub enum StoredKeyVersion {
|
||||
V1,
|
||||
}
|
||||
|
|
|
@ -31,41 +31,39 @@
|
|||
use std::{fmt::Debug, mem::swap};
|
||||
use zeroize::Zeroize;
|
||||
#[derive(Clone)]
|
||||
pub struct Protected<T>
|
||||
#[cfg_attr(feature = "specta", derive(specta::Type))]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Deserialize))]
|
||||
#[cfg_attr(any(feature = "specta", feature = "serde"), serde(transparent))]
|
||||
pub struct Protected<T>(T)
|
||||
where
|
||||
T: Zeroize,
|
||||
{
|
||||
data: T,
|
||||
}
|
||||
T: Zeroize;
|
||||
|
||||
impl<T> Protected<T>
|
||||
where
|
||||
T: Zeroize,
|
||||
{
|
||||
pub const fn new(value: T) -> Self {
|
||||
Self { data: value }
|
||||
Self(value)
|
||||
}
|
||||
|
||||
pub const fn expose(&self) -> &T {
|
||||
&self.data
|
||||
&self.0
|
||||
}
|
||||
|
||||
pub fn zeroize(mut self) {
|
||||
self.data.zeroize();
|
||||
self.0.zeroize();
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Vec<u8>> for Protected<Vec<u8>> {
|
||||
fn from(value: Vec<u8>) -> Self {
|
||||
Self { data: value }
|
||||
Self(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Protected<String>> for Protected<Vec<u8>> {
|
||||
fn from(value: Protected<String>) -> Self {
|
||||
Self {
|
||||
data: value.expose().as_bytes().to_vec(),
|
||||
}
|
||||
Self(value.expose().as_bytes().to_vec())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -75,7 +73,7 @@ where
|
|||
{
|
||||
pub fn into_inner(mut self) -> T {
|
||||
let mut out = Default::default();
|
||||
swap(&mut self.data, &mut out);
|
||||
swap(&mut self.0, &mut out);
|
||||
out
|
||||
}
|
||||
}
|
||||
|
@ -85,7 +83,7 @@ where
|
|||
T: Zeroize,
|
||||
{
|
||||
fn drop(&mut self) {
|
||||
self.data.zeroize();
|
||||
self.0.zeroize();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -97,41 +95,3 @@ where
|
|||
f.write_str("[REDACTED]")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "serde")]
|
||||
impl<'de, T> serde::Deserialize<'de> for Protected<T>
|
||||
where
|
||||
T: serde::Deserialize<'de> + Zeroize,
|
||||
{
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
Ok(Self::new(T::deserialize(deserializer)?))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "rspc")]
|
||||
use rspc::internal::specta;
|
||||
|
||||
#[cfg(feature = "rspc")]
|
||||
impl<T> specta::Type for Protected<T>
|
||||
where
|
||||
T: specta::Type + Zeroize,
|
||||
{
|
||||
const NAME: &'static str = T::NAME;
|
||||
const SID: specta::TypeSid = specta::sid!();
|
||||
const IMPL_LOCATION: specta::ImplLocation = specta::impl_location!();
|
||||
|
||||
fn inline(opts: specta::DefOpts, generics: &[specta::DataType]) -> specta::DataType {
|
||||
T::inline(opts, generics)
|
||||
}
|
||||
|
||||
fn reference(opts: specta::DefOpts, generics: &[specta::DataType]) -> specta::DataType {
|
||||
T::reference(opts, generics)
|
||||
}
|
||||
|
||||
fn definition(opts: specta::DefOpts) -> specta::DataTypeExt {
|
||||
T::definition(opts)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ use serde_big_array::BigArray;
|
|||
/// You may also generate a nonce for a given algorithm with `Nonce::generate()`
|
||||
#[derive(Clone, Copy, Eq, PartialEq)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
#[cfg_attr(feature = "rspc", derive(rspc::Type))]
|
||||
#[cfg_attr(feature = "specta", derive(specta::Type))]
|
||||
pub enum Nonce {
|
||||
XChaCha20Poly1305([u8; 20]),
|
||||
Aes256Gcm([u8; 8]),
|
||||
|
@ -32,7 +32,7 @@ pub enum Nonce {
|
|||
derive(serde::Serialize),
|
||||
derive(serde::Deserialize)
|
||||
)]
|
||||
#[cfg_attr(feature = "rspc", derive(rspc::Type))]
|
||||
#[cfg_attr(feature = "specta", derive(specta::Type))]
|
||||
pub enum Params {
|
||||
Standard,
|
||||
Hardened,
|
||||
|
@ -47,7 +47,7 @@ pub enum Params {
|
|||
derive(serde::Deserialize),
|
||||
serde(tag = "name", content = "params")
|
||||
)]
|
||||
#[cfg_attr(feature = "rspc", derive(rspc::Type))]
|
||||
#[cfg_attr(feature = "specta", derive(specta::Type))]
|
||||
pub enum HashingAlgorithm {
|
||||
Argon2id(Params),
|
||||
BalloonBlake3(Params),
|
||||
|
@ -127,7 +127,7 @@ impl Deref for Nonce {
|
|||
derive(serde::Serialize),
|
||||
derive(serde::Deserialize)
|
||||
)]
|
||||
#[cfg_attr(feature = "rspc", derive(rspc::Type))]
|
||||
#[cfg_attr(feature = "specta", derive(specta::Type))]
|
||||
pub enum Algorithm {
|
||||
XChaCha20Poly1305,
|
||||
Aes256Gcm,
|
||||
|
@ -300,7 +300,7 @@ impl From<SecretKeyString> for SecretKey {
|
|||
/// This is always `ENCRYPTED_KEY_LEN` (which is `KEY_LEM` + `AEAD_TAG_LEN`)
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
#[cfg_attr(feature = "rspc", derive(rspc::Type))]
|
||||
#[cfg_attr(feature = "specta", derive(specta::Type))]
|
||||
pub struct EncryptedKey(
|
||||
#[cfg_attr(feature = "serde", serde(with = "BigArray"))] // salt used for file data
|
||||
pub [u8; ENCRYPTED_KEY_LEN],
|
||||
|
@ -327,7 +327,7 @@ impl TryFrom<Vec<u8>> for EncryptedKey {
|
|||
/// You may also generate a salt with `Salt::generate()`
|
||||
#[derive(Clone, PartialEq, Eq, Copy)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
#[cfg_attr(feature = "rspc", derive(rspc::Type))]
|
||||
#[cfg_attr(feature = "specta", derive(specta::Type))]
|
||||
pub struct Salt(pub [u8; SALT_LEN]);
|
||||
|
||||
impl Salt {
|
||||
|
@ -357,7 +357,7 @@ impl TryFrom<Vec<u8>> for Salt {
|
|||
|
||||
#[derive(Clone)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Deserialize))]
|
||||
#[cfg_attr(feature = "rspc", derive(rspc::Type))]
|
||||
#[cfg_attr(feature = "specta", derive(specta::Type))]
|
||||
pub struct OnboardingConfig {
|
||||
pub password: Protected<String>,
|
||||
pub algorithm: Algorithm,
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
use std::{fmt::Display, str::FromStr};
|
||||
|
||||
#[derive(Debug, Clone, Copy, Eq, Hash, Ord, PartialEq, PartialOrd)]
|
||||
#[cfg_attr(feature = "specta", derive(specta::Type))]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
#[cfg_attr(feature = "serde", serde(transparent))]
|
||||
pub struct PeerId(pub(crate) libp2p::PeerId);
|
||||
#[cfg_attr(any(feature = "specta", feature = "serde"), serde(transparent))]
|
||||
pub struct PeerId(#[specta(type = String)] pub(crate) libp2p::PeerId);
|
||||
|
||||
impl FromStr for PeerId {
|
||||
type Err = libp2p::core::ParseError;
|
||||
|
@ -18,25 +19,3 @@ impl Display for PeerId {
|
|||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Replace this with transparent when the new Specta release is merged
|
||||
// TODO: #[cfg_attr(feature = "specta", derive(specta::Type))]
|
||||
// TODO: pub struct PeerId(#[cfg_attr(feature = "specta", specta(type = String))] pub(crate) libp2p::PeerId);
|
||||
#[cfg(feature = "specta")]
|
||||
impl specta::Type for PeerId {
|
||||
const NAME: &'static str = "PeerId";
|
||||
const SID: specta::TypeSid = specta::sid!();
|
||||
const IMPL_LOCATION: specta::ImplLocation = specta::impl_location!();
|
||||
|
||||
fn inline(opts: specta::DefOpts, generics: &[specta::DataType]) -> specta::DataType {
|
||||
<String as specta::Type>::inline(opts, generics)
|
||||
}
|
||||
|
||||
fn reference(opts: specta::DefOpts, generics: &[specta::DataType]) -> specta::DataType {
|
||||
<String as specta::Type>::reference(opts, generics)
|
||||
}
|
||||
|
||||
fn definition(opts: specta::DefOpts) -> specta::DataTypeExt {
|
||||
<String as specta::Type>::definition(opts)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -114,11 +114,11 @@ impl PrismaGenerator for SDSyncGenerator {
|
|||
|
||||
let typ = match field {
|
||||
dml::Field::ScalarField(_) => {
|
||||
field.type_tokens(quote!(self))
|
||||
field.type_tokens("e!(self))
|
||||
},
|
||||
dml::Field::RelationField(relation)=> {
|
||||
let relation_model_name_snake = snake_ident(&relation.relation_info.referenced_model);
|
||||
quote!(super::#relation_model_name_snake::SyncId)
|
||||
Some(quote!(super::#relation_model_name_snake::SyncId))
|
||||
},
|
||||
_ => return None
|
||||
};
|
||||
|
|
|
@ -2,11 +2,10 @@
|
|||
name = "sd-sync"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
rand = "0.8.5"
|
||||
rspc = { workspace = true, features = ["uuid", "uhlc"] }
|
||||
specta = { workspace = true, features = ["uuid", "uhlc"] }
|
||||
serde = "1.0.145"
|
||||
serde_json = "1.0.85"
|
||||
uhlc = "0.5.1"
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
use std::{collections::BTreeMap, fmt::Debug};
|
||||
|
||||
use rspc::Type;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::{Map, Value};
|
||||
use specta::Type;
|
||||
use uhlc::NTP64;
|
||||
use uuid::Uuid;
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
export type Procedures = {
|
||||
queries:
|
||||
{ key: "buildInfo", input: never, result: BuildInfo } |
|
||||
{ key: "files.get", input: LibraryArgs<GetArgs>, result: { id: number, pub_id: number[], kind: number, key_id: number | null, hidden: boolean, favorite: boolean, important: boolean, has_thumbnail: boolean, has_thumbstrip: boolean, has_video_preview: boolean, ipfs_id: string | null, note: string | null, date_created: string, file_paths: FilePath[], media_data: MediaData | null } | null } |
|
||||
{ key: "files.get", input: LibraryArgs<GetArgs>, result: { id: number; pub_id: number[]; kind: number; key_id: number | null; hidden: boolean; favorite: boolean; important: boolean; has_thumbnail: boolean; has_thumbstrip: boolean; has_video_preview: boolean; ipfs_id: string | null; note: string | null; date_created: string; file_paths: FilePath[]; media_data: MediaData | null } | null } |
|
||||
{ key: "jobs.getHistory", input: LibraryArgs<null>, result: JobReport[] } |
|
||||
{ key: "jobs.getRunning", input: LibraryArgs<null>, result: JobReport[] } |
|
||||
{ key: "keys.getDefault", input: LibraryArgs<null>, result: string | null } |
|
||||
|
@ -17,14 +17,14 @@ export type Procedures = {
|
|||
{ key: "keys.listMounted", input: LibraryArgs<null>, result: string[] } |
|
||||
{ key: "library.getStatistics", input: LibraryArgs<null>, result: Statistics } |
|
||||
{ key: "library.list", input: never, result: LibraryConfigWrapped[] } |
|
||||
{ key: "locations.getById", input: LibraryArgs<number>, result: location_with_indexer_rules | null } |
|
||||
{ key: "locations.getById", input: LibraryArgs<number>, result: LocationWithIndexerRules | null } |
|
||||
{ key: "locations.getExplorerData", input: LibraryArgs<LocationExplorerArgs>, result: ExplorerData } |
|
||||
{ key: "locations.indexer_rules.get", input: LibraryArgs<number>, result: IndexerRule } |
|
||||
{ key: "locations.indexer_rules.list", input: LibraryArgs<null>, result: IndexerRule[] } |
|
||||
{ key: "locations.indexer_rules.listForLocation", input: LibraryArgs<number>, result: IndexerRule[] } |
|
||||
{ key: "locations.list", input: LibraryArgs<null>, result: { id: number, pub_id: number[], node_id: number, name: string, path: string, total_capacity: number | null, available_capacity: number | null, is_archived: boolean, generate_preview_media: boolean, sync_preview_media: boolean, hidden: boolean, date_created: string, node: Node }[] } |
|
||||
{ key: "locations.list", input: LibraryArgs<null>, result: ({ id: number; pub_id: number[]; node_id: number; name: string; path: string; total_capacity: number | null; available_capacity: number | null; is_archived: boolean; generate_preview_media: boolean; sync_preview_media: boolean; hidden: boolean; date_created: string; node: Node })[] } |
|
||||
{ key: "nodeState", input: never, result: NodeState } |
|
||||
{ key: "search", input: LibraryArgs<{ locationId?: number, afterFileId?: [number, number], take?: number, order?: Ordering, search?: string, extension?: string, kind?: number, tags?: number[], createdAtFrom?: string, createdAtTo?: string, path?: string }>, result: ExplorerItem[] } |
|
||||
{ key: "search", input: LibraryArgs<{ locationId?: number | null; afterFileId?: [number, number] | null; take?: number | null; order?: Ordering | null; search?: string | null; extension?: string | null; kind?: number | null; tags?: number[] | null; createdAtFrom?: string | null; createdAtTo?: string | null; path?: string | null }>, result: ExplorerItem[] } |
|
||||
{ key: "sync.messages", input: LibraryArgs<null>, result: CRDTOperation[] } |
|
||||
{ key: "tags.get", input: LibraryArgs<number>, result: Tag | null } |
|
||||
{ key: "tags.getExplorerData", input: LibraryArgs<number>, result: ExplorerData } |
|
||||
|
@ -87,234 +87,238 @@ export type Procedures = {
|
|||
{ key: "sync.newMessage", input: LibraryArgs<null>, result: CRDTOperation }
|
||||
};
|
||||
|
||||
/**
|
||||
* These are all possible algorithms that can be used for encryption and decryption
|
||||
*/
|
||||
export type Algorithm = "XChaCha20Poly1305" | "Aes256Gcm"
|
||||
|
||||
export type AutomountUpdateArgs = { uuid: string, status: boolean }
|
||||
|
||||
export type BuildInfo = { version: string, commit: string }
|
||||
|
||||
export type CRDTOperation = { node: string, timestamp: number, id: string, typ: CRDTOperationType }
|
||||
|
||||
export type CRDTOperationType = SharedOperation | RelationOperation | OwnedOperation
|
||||
|
||||
export type CreateLibraryArgs = { name: string }
|
||||
|
||||
export type EditLibraryArgs = { id: string, name: string | null, description: string | null }
|
||||
|
||||
/**
|
||||
* This should be used for passing an encrypted key around.
|
||||
*
|
||||
* This is always `ENCRYPTED_KEY_LEN` (which is `KEY_LEM` + `AEAD_TAG_LEN`)
|
||||
*/
|
||||
export type EncryptedKey = number[]
|
||||
|
||||
export type ExplorerContext = ({ type: "Location" } & Location) | ({ type: "Tag" } & Tag)
|
||||
|
||||
export type ExplorerData = { context: ExplorerContext, items: ExplorerItem[] }
|
||||
|
||||
export type ExplorerItem = { type: "Path", has_thumbnail: boolean, item: file_path_with_object } | { type: "Object", has_thumbnail: boolean, item: object_with_file_paths }
|
||||
|
||||
export type FileCopierJobInit = { source_location_id: number, source_path_id: number, target_location_id: number, target_path: string, target_file_name_suffix: string | null }
|
||||
|
||||
export type FileCutterJobInit = { source_location_id: number, source_path_id: number, target_location_id: number, target_path: string }
|
||||
|
||||
export type FileDecryptorJobInit = { location_id: number, path_id: number, mount_associated_key: boolean, output_path: string | null, password: string | null, save_to_library: boolean | null }
|
||||
|
||||
export type FileDeleterJobInit = { location_id: number, path_id: number }
|
||||
|
||||
export type FileEncryptorJobInit = { location_id: number, path_id: number, key_uuid: string, algorithm: Algorithm, metadata: boolean, preview_media: boolean, output_path: string | null }
|
||||
|
||||
export type FileEraserJobInit = { location_id: number, path_id: number, passes: string }
|
||||
|
||||
export type FilePath = { id: number, is_dir: boolean, cas_id: string | null, integrity_checksum: string | null, location_id: number, materialized_path: string, name: string, extension: string, size_in_bytes: string, inode: number[], device: number[], object_id: number | null, parent_id: number | null, key_id: number | null, date_created: string, date_modified: string, date_indexed: string }
|
||||
|
||||
export type GenerateThumbsForLocationArgs = { id: number, path: string }
|
||||
|
||||
export type GetArgs = { id: number }
|
||||
|
||||
/**
|
||||
* This defines all available password hashing algorithms.
|
||||
*/
|
||||
export type HashingAlgorithm = { name: "Argon2id", params: Params } | { name: "BalloonBlake3", params: Params }
|
||||
|
||||
export type IdentifyUniqueFilesArgs = { id: number, path: string }
|
||||
|
||||
export type IndexerRule = { id: number, kind: number, name: string, default: boolean, parameters: number[], date_created: string, date_modified: string }
|
||||
|
||||
/**
|
||||
* `IndexerRuleCreateArgs` is the argument received from the client using rspc to create a new indexer rule.
|
||||
* Note that `parameters` field **MUST** be a JSON object serialized to bytes.
|
||||
*
|
||||
* In case of `RuleKind::AcceptFilesByGlob` or `RuleKind::RejectFilesByGlob`, it will be a
|
||||
* single string containing a glob pattern.
|
||||
*
|
||||
* In case of `RuleKind::AcceptIfChildrenDirectoriesArePresent` or `RuleKind::RejectIfChildrenDirectoriesArePresent` the
|
||||
* `parameters` field must be a vector of strings containing the names of the directories.
|
||||
*/
|
||||
export type IndexerRuleCreateArgs = { kind: RuleKind, name: string, parameters: string[] }
|
||||
|
||||
export type InvalidateOperationEvent = { key: string, arg: any, result: any | null }
|
||||
|
||||
export type JobReport = { id: string, name: string, action: string | null, data: number[] | null, metadata: any | null, is_background: boolean, created_at: string | null, started_at: string | null, completed_at: string | null, parent_id: string | null, status: JobStatus, task_count: number, completed_task_count: number, message: string }
|
||||
|
||||
export type JobStatus = "Queued" | "Running" | "Completed" | "Canceled" | "Failed" | "Paused"
|
||||
|
||||
export type KeyAddArgs = { algorithm: Algorithm, hashing_algorithm: HashingAlgorithm, key: string, library_sync: boolean, automount: boolean }
|
||||
|
||||
/**
|
||||
* Can wrap a query argument to require it to contain a `library_id` and provide helpers for working with libraries.
|
||||
*/
|
||||
export type LibraryArgs<T> = { library_id: string, arg: T }
|
||||
|
||||
/**
|
||||
* LibraryConfig holds the configuration for a specific library. This is stored as a '{uuid}.sdlibrary' file.
|
||||
*/
|
||||
export type LibraryConfig = { name: string, description: string }
|
||||
|
||||
export type LibraryConfigWrapped = { uuid: string, config: LibraryConfig }
|
||||
|
||||
export type LightScanArgs = { location_id: number, sub_path: string }
|
||||
|
||||
export type Location = { id: number, pub_id: number[], node_id: number, name: string, path: string, total_capacity: number | null, available_capacity: number | null, is_archived: boolean, generate_preview_media: boolean, sync_preview_media: boolean, hidden: boolean, date_created: string }
|
||||
|
||||
/**
|
||||
* `LocationCreateArgs` is the argument received from the client using `rspc` to create a new location.
|
||||
* It has the actual path and a vector of indexer rules ids, to create many-to-many relationships
|
||||
* between the location and indexer rules.
|
||||
*/
|
||||
export type LocationCreateArgs = { path: string, indexer_rules_ids: number[] }
|
||||
|
||||
export type LocationExplorerArgs = { location_id: number, path: string | null, limit: number, cursor: string | null, kind: number[] | null }
|
||||
|
||||
/**
|
||||
* `LocationUpdateArgs` is the argument received from the client using `rspc` to update a location.
|
||||
* It contains the id of the location to be updated, possible a name to change the current location's name
|
||||
* and a vector of indexer rules ids to add or remove from the location.
|
||||
*
|
||||
* It is important to note that only the indexer rule ids in this vector will be used from now on.
|
||||
* Old rules that aren't in this vector will be purged.
|
||||
*/
|
||||
export type LocationUpdateArgs = { id: number, name: string | null, generate_preview_media: boolean | null, sync_preview_media: boolean | null, hidden: boolean | null, indexer_rules_ids: number[] }
|
||||
|
||||
export type MasterPasswordChangeArgs = { password: string, algorithm: Algorithm, hashing_algorithm: HashingAlgorithm }
|
||||
|
||||
export type MediaData = { id: number, pixel_width: number | null, pixel_height: number | null, longitude: number | null, latitude: number | null, fps: number | null, capture_device_make: string | null, capture_device_model: string | null, capture_device_software: string | null, duration_seconds: number | null, codecs: string | null, streams: number | null }
|
||||
|
||||
export type Node = { id: number, pub_id: number[], name: string, platform: number, version: string | null, last_seen: string, timezone: string | null, date_created: string }
|
||||
|
||||
/**
|
||||
* NodeConfig is the configuration for a node. This is shared between all libraries and is stored in a JSON file on disk.
|
||||
*/
|
||||
export type NodeConfig = { id: string, name: string, p2p_port: number | null, p2p_email: string | null, p2p_img_url: string | null }
|
||||
|
||||
export type NodeState = ({ id: string, name: string, p2p_port: number | null, p2p_email: string | null, p2p_img_url: string | null }) & { data_path: string }
|
||||
|
||||
/**
|
||||
* This should be used for providing a nonce to encrypt/decrypt functions.
|
||||
*
|
||||
* You may also generate a nonce for a given algorithm with `Nonce::generate()`
|
||||
*/
|
||||
export type Nonce = { XChaCha20Poly1305: number[] } | { Aes256Gcm: number[] }
|
||||
|
||||
export type Object = { id: number, pub_id: number[], kind: number, key_id: number | null, hidden: boolean, favorite: boolean, important: boolean, has_thumbnail: boolean, has_thumbstrip: boolean, has_video_preview: boolean, ipfs_id: string | null, note: string | null, date_created: string }
|
||||
|
||||
export type ObjectValidatorArgs = { id: number, path: string }
|
||||
|
||||
export type OnboardingConfig = { password: string, algorithm: Algorithm, hashing_algorithm: HashingAlgorithm }
|
||||
|
||||
/**
|
||||
* Represents the operating system which the remote peer is running.
|
||||
* This is not used internally and predominantly is designed to be used for display purposes by the embedding application.
|
||||
*/
|
||||
export type OperatingSystem = "Windows" | "Linux" | "MacOS" | "Ios" | "Android" | { Other: string }
|
||||
|
||||
export type Ordering = { name: boolean }
|
||||
|
||||
export type OwnedOperation = { model: string, items: OwnedOperationItem[] }
|
||||
|
||||
export type OwnedOperationData = { Create: { [key: string]: any } } | { CreateMany: { values: [any, { [key: string]: any }][], skip_duplicates: boolean } } | { Update: { [key: string]: any } } | "Delete"
|
||||
|
||||
export type OwnedOperationItem = { id: any, data: OwnedOperationData }
|
||||
|
||||
/**
|
||||
* TODO: P2P event for the frontend
|
||||
*/
|
||||
export type P2PEvent = { type: "DiscoveredPeer", peer_id: string, metadata: PeerMetadata } | { type: "SyncOperation", library_id: string, operations: CRDTOperation[] }
|
||||
|
||||
/**
|
||||
* These parameters define the password-hashing level.
|
||||
*
|
||||
* The greater the parameter, the longer the password will take to hash.
|
||||
*/
|
||||
export type Params = "Standard" | "Hardened" | "Paranoid"
|
||||
|
||||
export type PeerMetadata = { name: string, operating_system: OperatingSystem | null, version: string | null, email: string | null, img_url: string | null }
|
||||
|
||||
export type RelationOperation = { relation_item: string, relation_group: string, relation: string, data: RelationOperationData }
|
||||
|
||||
export type RelationOperationData = "Create" | { Update: { field: string, value: any } } | "Delete"
|
||||
|
||||
export type RenameFileArgs = { location_id: number, file_name: string, new_file_name: string }
|
||||
|
||||
export type RestoreBackupArgs = { password: string, secret_key: string, path: string }
|
||||
export type EditLibraryArgs = { id: string; name: string | null; description: string | null }
|
||||
|
||||
export type RuleKind = "AcceptFilesByGlob" | "RejectFilesByGlob" | "AcceptIfChildrenDirectoriesArePresent" | "RejectIfChildrenDirectoriesArePresent"
|
||||
|
||||
export type PeerMetadata = { name: string; operating_system: OperatingSystem | null; version: string | null; email: string | null; img_url: string | null }
|
||||
|
||||
/**
|
||||
* This should be used for passing a salt around.
|
||||
* `LocationUpdateArgs` is the argument received from the client using `rspc` to update a location.
|
||||
* It contains the id of the location to be updated, possible a name to change the current location's name
|
||||
* and a vector of indexer rules ids to add or remove from the location.
|
||||
*
|
||||
* You may also generate a salt with `Salt::generate()`
|
||||
* It is important to note that only the indexer rule ids in this vector will be used from now on.
|
||||
* Old rules that aren't in this vector will be purged.
|
||||
*/
|
||||
export type Salt = number[]
|
||||
|
||||
export type SetFavoriteArgs = { id: number, favorite: boolean }
|
||||
|
||||
export type SetNoteArgs = { id: number, note: string | null }
|
||||
|
||||
export type SharedOperation = { record_id: any, model: string, data: SharedOperationData }
|
||||
|
||||
export type SharedOperationCreateData = { u: { [key: string]: any } } | "a"
|
||||
|
||||
export type SharedOperationData = SharedOperationCreateData | { field: string, value: any } | null
|
||||
|
||||
export type SpacedropArgs = { peer_id: string, file_path: string }
|
||||
|
||||
export type Statistics = { id: number, date_captured: string, total_object_count: number, library_db_size: string, total_bytes_used: string, total_bytes_capacity: string, total_unique_bytes: string, total_bytes_free: string, preview_media_bytes: string }
|
||||
export type LocationUpdateArgs = { id: number; name: string | null; generate_preview_media: boolean | null; sync_preview_media: boolean | null; hidden: boolean | null; indexer_rules_ids: number[] }
|
||||
|
||||
/**
|
||||
* This is a stored key, and can be freely written to the database.
|
||||
*
|
||||
* It contains no sensitive information that is not encrypted.
|
||||
* NodeConfig is the configuration for a node. This is shared between all libraries and is stored in a JSON file on disk.
|
||||
*/
|
||||
export type StoredKey = { uuid: string, version: StoredKeyVersion, key_type: StoredKeyType, algorithm: Algorithm, hashing_algorithm: HashingAlgorithm, content_salt: Salt, master_key: EncryptedKey, master_key_nonce: Nonce, key_nonce: Nonce, key: number[], salt: Salt, memory_only: boolean, automount: boolean }
|
||||
export type NodeConfig = { id: string; name: string; p2p_port: number | null; p2p_email: string | null; p2p_img_url: string | null }
|
||||
|
||||
/**
|
||||
* This denotes the type of key. `Root` keys can be used to unlock the key manager, and `User` keys are ordinary keys.
|
||||
*/
|
||||
export type StoredKeyType = "User" | "Root"
|
||||
|
||||
/**
|
||||
* This denotes the `StoredKey` version.
|
||||
* This denotes the `StoredKey` version.
|
||||
*/
|
||||
export type StoredKeyVersion = "V1"
|
||||
|
||||
export type Tag = { id: number, pub_id: number[], name: string | null, color: string | null, total_objects: number | null, redundancy_goal: number | null, date_created: string, date_modified: string }
|
||||
/**
|
||||
* This should be used for passing an encrypted key around.
|
||||
*
|
||||
* This is always `ENCRYPTED_KEY_LEN` (which is `KEY_LEM` + `AEAD_TAG_LEN`)
|
||||
*/
|
||||
export type EncryptedKey = number[]
|
||||
|
||||
export type TagAssignArgs = { object_id: number, tag_id: number, unassign: boolean }
|
||||
export type PeerId = string
|
||||
|
||||
export type TagCreateArgs = { name: string, color: string }
|
||||
export type Object = { id: number; pub_id: number[]; kind: number; key_id: number | null; hidden: boolean; favorite: boolean; important: boolean; has_thumbnail: boolean; has_thumbstrip: boolean; has_video_preview: boolean; ipfs_id: string | null; note: string | null; date_created: string }
|
||||
|
||||
export type TagUpdateArgs = { id: number, name: string | null, color: string | null }
|
||||
export type LibraryConfigWrapped = { uuid: string; config: LibraryConfig }
|
||||
|
||||
export type UnlockKeyManagerArgs = { password: string, secret_key: string }
|
||||
/**
|
||||
* These parameters define the password-hashing level.
|
||||
*
|
||||
* The greater the parameter, the longer the password will take to hash.
|
||||
*/
|
||||
export type Params = "Standard" | "Hardened" | "Paranoid"
|
||||
|
||||
export type Volume = { name: string, mount_point: string, total_capacity: string, available_capacity: string, is_removable: boolean, disk_type: string | null, file_system: string | null, is_root_filesystem: boolean }
|
||||
export type ExplorerData = { context: ExplorerContext; items: ExplorerItem[] }
|
||||
|
||||
export type file_path_with_object = { id: number, is_dir: boolean, cas_id: string | null, integrity_checksum: string | null, location_id: number, materialized_path: string, name: string, extension: string, size_in_bytes: string, inode: number[], device: number[], object_id: number | null, parent_id: number | null, key_id: number | null, date_created: string, date_modified: string, date_indexed: string, object: Object | null }
|
||||
/**
|
||||
* Represents the operating system which the remote peer is running.
|
||||
* This is not used internally and predominantly is designed to be used for display purposes by the embedding application.
|
||||
*/
|
||||
export type OperatingSystem = "Windows" | "Linux" | "MacOS" | "Ios" | "Android" | { Other: string }
|
||||
|
||||
export type location_with_indexer_rules = { id: number, pub_id: number[], node_id: number, name: string, path: string, total_capacity: number | null, available_capacity: number | null, is_archived: boolean, generate_preview_media: boolean, sync_preview_media: boolean, hidden: boolean, date_created: string, indexer_rules: { indexer_rule: IndexerRule }[] }
|
||||
export type MediaData = { id: number; pixel_width: number | null; pixel_height: number | null; longitude: number | null; latitude: number | null; fps: number | null; capture_device_make: string | null; capture_device_model: string | null; capture_device_software: string | null; duration_seconds: number | null; codecs: string | null; streams: number | null }
|
||||
|
||||
export type object_with_file_paths = { id: number, pub_id: number[], kind: number, key_id: number | null, hidden: boolean, favorite: boolean, important: boolean, has_thumbnail: boolean, has_thumbstrip: boolean, has_video_preview: boolean, ipfs_id: string | null, note: string | null, date_created: string, file_paths: FilePath[] }
|
||||
export type IdentifyUniqueFilesArgs = { id: number; path: string }
|
||||
|
||||
/**
|
||||
* This is a stored key, and can be freely written to the database.
|
||||
*
|
||||
* It contains no sensitive information that is not encrypted.
|
||||
*/
|
||||
export type StoredKey = { uuid: string; version: StoredKeyVersion; key_type: StoredKeyType; algorithm: Algorithm; hashing_algorithm: HashingAlgorithm; content_salt: Salt; master_key: EncryptedKey; master_key_nonce: Nonce; key_nonce: Nonce; key: number[]; salt: Salt; memory_only: boolean; automount: boolean }
|
||||
|
||||
export type OnboardingConfig = { password: Protected<string>; algorithm: Algorithm; hashing_algorithm: HashingAlgorithm }
|
||||
|
||||
/**
|
||||
* This should be used for providing a nonce to encrypt/decrypt functions.
|
||||
*
|
||||
* You may also generate a nonce for a given algorithm with `Nonce::generate()`
|
||||
*/
|
||||
export type Nonce = { XChaCha20Poly1305: number[] } | { Aes256Gcm: number[] }
|
||||
|
||||
export type FilePathWithObject = { id: number; is_dir: boolean; cas_id: string | null; integrity_checksum: string | null; location_id: number; materialized_path: string; name: string; extension: string; size_in_bytes: string; inode: number[]; device: number[]; object_id: number | null; parent_id: number | null; key_id: number | null; date_created: string; date_modified: string; date_indexed: string; object: Object | null }
|
||||
|
||||
export type SpacedropArgs = { peer_id: PeerId; file_path: string }
|
||||
|
||||
export type CRDTOperation = { node: string; timestamp: number; id: string; typ: CRDTOperationType }
|
||||
|
||||
export type ObjectWithFilePaths = { id: number; pub_id: number[]; kind: number; key_id: number | null; hidden: boolean; favorite: boolean; important: boolean; has_thumbnail: boolean; has_thumbstrip: boolean; has_video_preview: boolean; ipfs_id: string | null; note: string | null; date_created: string; file_paths: FilePath[] }
|
||||
|
||||
/**
|
||||
* This should be used for passing a salt around.
|
||||
*
|
||||
* You may also generate a salt with `Salt::generate()`
|
||||
*/
|
||||
export type Salt = number[]
|
||||
|
||||
export type GetArgs = { id: number }
|
||||
|
||||
export type FileCutterJobInit = { source_location_id: number; source_path_id: number; target_location_id: number; target_path: string }
|
||||
|
||||
export type FilePath = { id: number; is_dir: boolean; cas_id: string | null; integrity_checksum: string | null; location_id: number; materialized_path: string; name: string; extension: string; size_in_bytes: string; inode: number[]; device: number[]; object_id: number | null; parent_id: number | null; key_id: number | null; date_created: string; date_modified: string; date_indexed: string }
|
||||
|
||||
export type JobStatus = "Queued" | "Running" | "Completed" | "Canceled" | "Failed" | "Paused"
|
||||
|
||||
export type FileEraserJobInit = { location_id: number; path_id: number; passes: string }
|
||||
|
||||
export type TagCreateArgs = { name: string; color: string }
|
||||
|
||||
/**
|
||||
* Can wrap a query argument to require it to contain a `library_id` and provide helpers for working with libraries.
|
||||
*/
|
||||
export type LibraryArgs<T> = { library_id: string; arg: T }
|
||||
|
||||
/**
|
||||
* TODO: P2P event for the frontend
|
||||
*/
|
||||
export type P2PEvent = { type: "DiscoveredPeer"; peer_id: PeerId; metadata: PeerMetadata } | { type: "SyncOperation"; library_id: string; operations: CRDTOperation[] }
|
||||
|
||||
export type SetFavoriteArgs = { id: number; favorite: boolean }
|
||||
|
||||
export type RenameFileArgs = { location_id: number; file_name: string; new_file_name: string }
|
||||
|
||||
export type Volume = { name: string; mount_point: string; total_capacity: string; available_capacity: string; is_removable: boolean; disk_type: string | null; file_system: string | null; is_root_filesystem: boolean }
|
||||
|
||||
export type FileDeleterJobInit = { location_id: number; path_id: number }
|
||||
|
||||
/**
|
||||
* These are all possible algorithms that can be used for encryption and decryption
|
||||
*/
|
||||
export type Algorithm = "XChaCha20Poly1305" | "Aes256Gcm"
|
||||
|
||||
export type CreateLibraryArgs = { name: string }
|
||||
|
||||
export type ExplorerItem = { type: "Path"; has_thumbnail: boolean; item: FilePathWithObject } | { type: "Object"; has_thumbnail: boolean; item: ObjectWithFilePaths }
|
||||
|
||||
export type IndexerRule = { id: number; kind: number; name: string; default: boolean; parameters: number[]; date_created: string; date_modified: string }
|
||||
|
||||
export type JobReport = { id: string; name: string; action: string | null; data: number[] | null; metadata: any | null; is_background: boolean; created_at: string | null; started_at: string | null; completed_at: string | null; parent_id: string | null; status: JobStatus; task_count: number; completed_task_count: number; message: string }
|
||||
|
||||
export type OwnedOperationItem = { id: any; data: OwnedOperationData }
|
||||
|
||||
export type CRDTOperationType = SharedOperation | RelationOperation | OwnedOperation
|
||||
|
||||
export type Statistics = { id: number; date_captured: string; total_object_count: number; library_db_size: string; total_bytes_used: string; total_bytes_capacity: string; total_unique_bytes: string; total_bytes_free: string; preview_media_bytes: string }
|
||||
|
||||
export type GenerateThumbsForLocationArgs = { id: number; path: string }
|
||||
|
||||
export type TagAssignArgs = { object_id: number; tag_id: number; unassign: boolean }
|
||||
|
||||
export type OwnedOperation = { model: string; items: OwnedOperationItem[] }
|
||||
|
||||
export type SharedOperation = { record_id: any; model: string; data: SharedOperationData }
|
||||
|
||||
export type MasterPasswordChangeArgs = { password: Protected<string>; algorithm: Algorithm; hashing_algorithm: HashingAlgorithm }
|
||||
|
||||
export type RelationOperationData = "Create" | { Update: { field: string; value: any } } | "Delete"
|
||||
|
||||
export type InvalidateOperationEvent = { key: string; arg: any; result: any | null }
|
||||
|
||||
export type FileEncryptorJobInit = { location_id: number; path_id: number; key_uuid: string; algorithm: Algorithm; metadata: boolean; preview_media: boolean; output_path: string | null }
|
||||
|
||||
export type SharedOperationCreateData = { u: { [key: string]: any } } | "a"
|
||||
|
||||
export type BuildInfo = { version: string; commit: string }
|
||||
|
||||
export type Location = { id: number; pub_id: number[]; node_id: number; name: string; path: string; total_capacity: number | null; available_capacity: number | null; is_archived: boolean; generate_preview_media: boolean; sync_preview_media: boolean; hidden: boolean; date_created: string }
|
||||
|
||||
/**
|
||||
* `LocationCreateArgs` is the argument received from the client using `rspc` to create a new location.
|
||||
* It has the actual path and a vector of indexer rules ids, to create many-to-many relationships
|
||||
* between the location and indexer rules.
|
||||
*/
|
||||
export type LocationCreateArgs = { path: string; indexer_rules_ids: number[] }
|
||||
|
||||
export type KeyAddArgs = { algorithm: Algorithm; hashing_algorithm: HashingAlgorithm; key: Protected<string>; library_sync: boolean; automount: boolean }
|
||||
|
||||
export type Node = { id: number; pub_id: number[]; name: string; platform: number; version: string | null; last_seen: string; timezone: string | null; date_created: string }
|
||||
|
||||
export type NodeState = ({ id: string; name: string; p2p_port: number | null; p2p_email: string | null; p2p_img_url: string | null }) & { data_path: string }
|
||||
|
||||
export type OwnedOperationData = { Create: { [key: string]: any } } | { CreateMany: { values: ([any, { [key: string]: any }])[]; skip_duplicates: boolean } } | { Update: { [key: string]: any } } | "Delete"
|
||||
|
||||
export type SharedOperationData = SharedOperationCreateData | { field: string; value: any } | null
|
||||
|
||||
export type LocationExplorerArgs = { location_id: number; path: string | null; limit: number; cursor: string | null; kind: number[] | null }
|
||||
|
||||
export type FileCopierJobInit = { source_location_id: number; source_path_id: number; target_location_id: number; target_path: string; target_file_name_suffix: string | null }
|
||||
|
||||
export type LightScanArgs = { location_id: number; sub_path: string }
|
||||
|
||||
/**
|
||||
* This defines all available password hashing algorithms.
|
||||
*/
|
||||
export type HashingAlgorithm = { name: "Argon2id"; params: Params } | { name: "BalloonBlake3"; params: Params }
|
||||
|
||||
export type AutomountUpdateArgs = { uuid: string; status: boolean }
|
||||
|
||||
export type ExplorerContext = ({ type: "Location" } & Location) | ({ type: "Tag" } & Tag)
|
||||
|
||||
export type LocationWithIndexerRules = { id: number; pub_id: number[]; node_id: number; name: string; path: string; total_capacity: number | null; available_capacity: number | null; is_archived: boolean; generate_preview_media: boolean; sync_preview_media: boolean; hidden: boolean; date_created: string; indexer_rules: ({ indexer_rule: IndexerRule })[] }
|
||||
|
||||
/**
|
||||
* LibraryConfig holds the configuration for a specific library. This is stored as a '{uuid}.sdlibrary' file.
|
||||
*/
|
||||
export type LibraryConfig = { name: string; description: string }
|
||||
|
||||
export type Ordering = { name: boolean }
|
||||
|
||||
export type UnlockKeyManagerArgs = { password: Protected<string>; secret_key: Protected<string> }
|
||||
|
||||
export type FileDecryptorJobInit = { location_id: number; path_id: number; mount_associated_key: boolean; output_path: string | null; password: string | null; save_to_library: boolean | null }
|
||||
|
||||
export type Protected<T> = T
|
||||
|
||||
/**
|
||||
* `IndexerRuleCreateArgs` is the argument received from the client using rspc to create a new indexer rule.
|
||||
* Note that `parameters` field **MUST** be a JSON object serialized to bytes.
|
||||
*
|
||||
* In case of `RuleKind::AcceptFilesByGlob` or `RuleKind::RejectFilesByGlob`, it will be a
|
||||
* single string containing a glob pattern.
|
||||
*
|
||||
* In case of `RuleKind::AcceptIfChildrenDirectoriesArePresent` or `RuleKind::RejectIfChildrenDirectoriesArePresent` the
|
||||
* `parameters` field must be a vector of strings containing the names of the directories.
|
||||
*/
|
||||
export type IndexerRuleCreateArgs = { kind: RuleKind; name: string; parameters: string[] }
|
||||
|
||||
export type SetNoteArgs = { id: number; note: string | null }
|
||||
|
||||
export type TagUpdateArgs = { id: number; name: string | null; color: string | null }
|
||||
|
||||
export type ObjectValidatorArgs = { id: number; path: string }
|
||||
|
||||
export type RestoreBackupArgs = { password: Protected<string>; secret_key: Protected<string>; path: string }
|
||||
|
||||
export type Tag = { id: number; pub_id: number[]; name: string | null; color: string | null; total_objects: number | null; redundancy_goal: number | null; date_created: string; date_modified: string }
|
||||
|
||||
export type RelationOperation = { relation_item: string; relation_group: string; relation: string; data: RelationOperationData }
|
||||
|
||||
/**
|
||||
* This denotes the type of key. `Root` keys can be used to unlock the key manager, and `User` keys are ordinary keys.
|
||||
*/
|
||||
export type StoredKeyType = "User" | "Root"
|
||||
|
|
Loading…
Reference in a new issue