- begin job runner

- changed job schema
- bridge useMutation
This commit is contained in:
Jamie Pine 2022-03-16 03:50:54 -07:00
parent 031818e2d8
commit dabe83f7fb
25 changed files with 357 additions and 212 deletions

View file

@ -41,6 +41,8 @@ For independant creatives, hoarders and those that want to own their digital foo
# Features
> NOTE: Spacedrive is under active development, most of the listed features are still experimental and subject to change.
<!-- ### Rich context -->
**Complete:** *(in testing)*
- **[File discovery](#)** - Scan devices, drives and cloud accounts to build a virtual, realtime updating, "yellow pages" directory of all files with metadata.
- **[Preview generation](#)** - Auto generate lower resolution stand-ins for image and video.

View file

@ -1,7 +1,7 @@
hard_tabs = true
tab_spaces = 4
match_block_trailing_comma = true
max_width = 150
max_width = 120
newline_style = "Unix"
use_field_init_shorthand = true
use_try_shorthand = true

View file

@ -1,2 +1,3 @@
import type { EncryptionAlgorithm } from "./EncryptionAlgorithm";
export type ClientCommand = { key: "FileRead", params: { id: bigint, } } | { key: "FileDelete", params: { id: bigint, } } | { key: "LibDelete", params: { id: bigint, } } | { key: "TagCreate", params: { name: string, color: string, } } | { key: "TagAssign", params: { file_id: bigint, tag_id: bigint, } } | { key: "TagDelete", params: { id: bigint, } } | { key: "LocScan", params: { id: bigint, } } | { key: "LocDelete", params: { id: bigint, } } | { key: "LocUpdate", params: { id: bigint, name: string | null, } } | { key: "SysVolumeUnmount", params: { id: bigint, } };
export type ClientCommand = { key: "FileRead", params: { id: bigint, } } | { key: "FileEncrypt", params: { id: bigint, algorithm: EncryptionAlgorithm, } } | { key: "FileDelete", params: { id: bigint, } } | { key: "LibDelete", params: { id: bigint, } } | { key: "TagCreate", params: { name: string, color: string, } } | { key: "TagUpdate", params: { name: string, color: string, } } | { key: "TagAssign", params: { file_id: bigint, tag_id: bigint, } } | { key: "TagDelete", params: { id: bigint, } } | { key: "LocCreate", params: { id: bigint, } } | { key: "LocUpdate", params: { id: bigint, name: string | null, } } | { key: "LocDelete", params: { id: bigint, } } | { key: "SysVolumeUnmount", params: { id: bigint, } };

View file

@ -1,2 +1,2 @@
export type ClientQuery = { key: "ClientGetState" } | { key: "SysGetVolumes" } | { key: "LibGetTags" } | { key: "SysGetLocation", params: { id: bigint, } } | { key: "LibGetExplorerDir", params: { path: string, limit: bigint, } };
export type ClientQuery = { key: "ClientGetState" } | { key: "SysGetVolumes" } | { key: "LibGetTags" } | { key: "JobGetRunning" } | { key: "JobGetHistory" } | { key: "SysGetLocation", params: { id: bigint, } } | { key: "LibGetExplorerDir", params: { path: string, limit: bigint, } };

View file

@ -1,3 +1,3 @@
import type { LibraryState } from "./LibraryState";
export interface ClientState { client_id: string, client_name: string, data_path: string, tcp_port: number, libraries: Array<LibraryState>, current_library_id: string, }
export interface ClientState { client_uuid: string, client_name: string, data_path: string, tcp_port: number, libraries: Array<LibraryState>, current_library_uuid: string, }

View file

@ -1,6 +1,7 @@
import type { ClientState } from "./ClientState";
import type { Directory } from "./Directory";
import type { JobResource } from "./JobResource";
import type { LocationResource } from "./LocationResource";
import type { Volume } from "./Volume";
export type CoreResponse = { key: "Success" } | { key: "SysGetVolumes", data: Array<Volume> } | { key: "SysGetLocations", data: LocationResource } | { key: "LibGetExplorerDir", data: Directory } | { key: "ClientGetState", data: ClientState };
export type CoreResponse = { key: "Success", data: null } | { key: "SysGetVolumes", data: Array<Volume> } | { key: "SysGetLocation", data: LocationResource } | { key: "LibGetExplorerDir", data: Directory } | { key: "ClientGetState", data: ClientState } | { key: "LocCreate", data: LocationResource } | { key: "JobGetRunning", data: Array<JobResource> } | { key: "JobGetHistory", data: Array<JobResource> };

View file

@ -0,0 +1,2 @@
export type EncryptionAlgorithm = "None" | "AES128" | "AES192" | "AES256";

View file

@ -0,0 +1,2 @@
export type JobAction = "ScanLoc" | "GeneratePreviewMedia";

View file

@ -0,0 +1,4 @@
import type { JobAction } from "./JobAction";
import type { JobStatus } from "./JobStatus";
export interface JobResource { id: bigint, client_id: bigint, action: JobAction, status: JobStatus, percentage_complete: bigint, task_count: bigint, message: string, completed_task_count: bigint, date_created: string, date_modified: string, }

View file

@ -0,0 +1,2 @@
export type JobStatus = "Queued" | "Running" | "Completed" | "Canceled";

View file

@ -1,2 +1,2 @@
export interface LibraryState { library_id: string, library_path: string, offline: boolean, }
export interface LibraryState { library_uuid: string, library_path: string, offline: boolean, }

View file

@ -0,0 +1,24 @@
/*
Warnings:
- You are about to drop the column `percentage_complete` on the `jobs` table. All the data in the column will be lost.
*/
-- RedefineTables
PRAGMA foreign_keys=OFF;
CREATE TABLE "new_jobs" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"client_id" INTEGER NOT NULL,
"action" INTEGER NOT NULL,
"status" INTEGER NOT NULL DEFAULT 0,
"task_count" INTEGER NOT NULL DEFAULT 1,
"completed_task_count" INTEGER NOT NULL DEFAULT 0,
"date_created" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"date_modified" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT "jobs_client_id_fkey" FOREIGN KEY ("client_id") REFERENCES "clients" ("id") ON DELETE NO ACTION ON UPDATE NO ACTION
);
INSERT INTO "new_jobs" ("action", "client_id", "completed_task_count", "date_created", "date_modified", "id", "status", "task_count") SELECT "action", "client_id", "completed_task_count", "date_created", "date_modified", "id", "status", "task_count" FROM "jobs";
DROP TABLE "jobs";
ALTER TABLE "new_jobs" RENAME TO "jobs";
PRAGMA foreign_key_check;
PRAGMA foreign_keys=ON;

View file

@ -135,11 +135,11 @@ model TagOnFile {
}
model Job {
id Int @id @default(autoincrement())
client_id Int
action Int
status Int @default(0)
percentage_complete Int @default(0)
id Int @id @default(autoincrement())
client_id Int
action Int
status Int @default(0)
task_count Int @default(1)
completed_task_count Int @default(0)
date_created DateTime @default(now())
@ -155,8 +155,8 @@ model Space {
encryption Int? @default(0)
date_created DateTime @default(now())
date_modified DateTime @default(now())
Library Library? @relation(fields: [libraryId], references: [id])
libraryId Int?
Library Library? @relation(fields: [libraryId], references: [id])
libraryId Int?
@@map("spaces")
}

View file

@ -29,12 +29,17 @@ pub async fn create() -> Result<()> {
_ => Platform::Unknown,
};
let client = match db.client().find_unique(Client::uuid().equals(config.client_id.clone())).exec().await {
let client = match db
.client()
.find_unique(Client::uuid().equals(config.client_uuid.clone()))
.exec()
.await
{
Some(client) => client,
None => {
db.client()
.create_one(
Client::uuid().set(config.client_id.clone()),
Client::uuid().set(config.client_uuid.clone()),
Client::name().set(hostname.clone()),
vec![Client::platform().set(platform as i64), Client::online().set(true)],
)

View file

@ -22,7 +22,7 @@ pub async fn get() -> Result<&'static PrismaClient, DatabaseError> {
let current_library = config
.libraries
.iter()
.find(|l| l.library_id == config.current_library_id)
.find(|l| l.library_uuid == config.current_library_uuid)
.ok_or(DatabaseError::MalformedConfig)?;
let path = current_library.library_path.clone();

View file

@ -24,7 +24,11 @@ pub async fn open_dir(path: &str) -> Result<Directory, FileError> {
.await
.ok_or(FileError::FileNotFound(path.to_string()))?;
let files = db.file().find_many(vec![File::parent_id().equals(directory.id)]).exec().await;
let files = db
.file()
.find_many(vec![File::parent_id().equals(directory.id)])
.exec()
.await;
Ok(Directory {
directory: directory.into(),

View file

@ -6,29 +6,29 @@ use serde::{Deserialize, Serialize};
use walkdir::{DirEntry, WalkDir};
use super::watcher::watch_dir;
use crate::db;
use crate::job::{JobAction, JobResource};
use crate::sys::locations::{create_location, get_location, LocationResource};
use crate::util::time;
use crate::{db, Core};
pub async fn scan_loc(location_id: i64) -> Result<()> {
pub async fn scan_loc(core: &mut Core, location_id: i64) -> Result<()> {
// get location by location_id from db and include location_paths
let location = get_location(location_id).await?;
if let Some(path) = &location.path {
scan_path(path).await?;
scan_path(core, path).await?;
watch_dir(path);
}
Ok(())
}
// creates a vector of valid path buffers from a directory
pub async fn scan_path(path: &str) -> Result<()> {
pub async fn scan_path(core: &mut Core, path: &str) -> Result<()> {
println!("Scanning directory: {}", &path);
// let current_library = library::loader::get().await?;
let db = db::get().await.unwrap();
let job = core.queue(JobResource::new(core.state.client_uuid.clone(), JobAction::ScanLoc, 1).await?);
let location = create_location(&path).await?;
// query db to highers id, so we can increment it for the new files indexed
@ -36,13 +36,14 @@ pub async fn scan_path(path: &str) -> Result<()> {
struct QueryRes {
id: i64,
}
// grab the next id so we can increment in memory for batch inserting
let mut next_file_id = match db._query_raw::<QueryRes>(r#"SELECT MAX(id) id FROM files"#).await {
Ok(rows) => rows[0].id,
Err(e) => Err(anyhow!("Error querying for next file id: {}", e))?,
};
let mut get_id = || {
next_file_id += 1; // increment id
next_file_id += 1;
next_file_id
};
@ -91,7 +92,11 @@ pub async fn scan_path(path: &str) -> Result<()> {
let scan_read_time = scan_start.elapsed();
for (i, chunk) in paths.chunks(100).enumerate() {
println!("Writing {} files to db at chunk {}", chunk.len(), i);
job.set_progress(
Some(12),
Some(format!("Writing {} files to db at chunk {}", chunk.len(), i)),
);
// vector to store active models
let mut files: Vec<String> = Vec::new();
for (file_path, file_id, parent_dir_id) in chunk {
@ -184,7 +189,11 @@ fn is_library(entry: &DirEntry) -> bool {
}
fn is_node_modules(entry: &DirEntry) -> bool {
entry.file_name().to_str().map(|s| s.contains("node_modules")).unwrap_or(false)
entry
.file_name()
.to_str()
.map(|s| s.contains("node_modules"))
.unwrap_or(false)
}
fn is_app_bundle(entry: &DirEntry) -> bool {

View file

@ -1,11 +1,12 @@
use crate::{
db::get,
db::{self, get},
prisma::{Client, Job, JobData},
ClientQuery, Core, CoreEvent,
};
use anyhow::Result;
use int_enum::IntEnum;
use serde::{Deserialize, Serialize};
use thiserror::Error;
use ts_rs::TS;
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
@ -17,6 +18,7 @@ pub struct JobResource {
pub status: JobStatus,
pub percentage_complete: i64,
pub task_count: i64,
pub message: String,
pub completed_task_count: i64,
#[ts(type = "string")]
pub date_created: chrono::DateTime<chrono::Utc>,
@ -50,21 +52,36 @@ impl Into<JobResource> for JobData {
client_id: self.client_id,
action: JobAction::from_int(self.action).unwrap(),
status: JobStatus::from_int(self.status).unwrap(),
percentage_complete: self.percentage_complete,
task_count: self.task_count,
completed_task_count: self.completed_task_count,
date_created: self.date_created,
date_modified: self.date_modified,
percentage_complete: 0,
message: "".to_string(),
}
}
}
pub struct JobRunner {
pub running_jobs: Vec<JobResource>,
pub queued_jobs: Vec<JobResource>,
}
impl JobRunner {
pub fn new() -> Self {
Self {
running_jobs: Vec::new(),
queued_jobs: Vec::new(),
}
}
}
impl JobResource {
pub async fn new(core: &Core, action: JobAction, task_count: i64) -> Result<Self> {
pub async fn new(client_uuid: String, action: JobAction, task_count: i64) -> Result<Self, JobError> {
let db = get().await?;
let client = db
.client()
.find_unique(Client::uuid().equals(core.state.client_id.clone()))
.find_unique(Client::uuid().equals(client_uuid))
.exec()
.await
.unwrap();
@ -79,6 +96,7 @@ impl JobResource {
completed_task_count: 0,
date_created: chrono::Utc::now(),
date_modified: chrono::Utc::now(),
message: "".to_string(),
};
db.job().create_one(
@ -90,33 +108,74 @@ impl JobResource {
Ok(job)
}
pub async fn update_task_count(&mut self, core: &Core, completed_task_count: i64) -> Result<Self> {
let db = get().await.unwrap();
db.job()
pub async fn save(&self, core: &Core) -> Result<(), JobError> {
let db = get().await?;
let job = db
.job()
.find_unique(Job::id().equals(self.id))
.update(vec![Job::completed_task_count().set(completed_task_count)])
.update(vec![
Job::status().set(self.status.int_value()),
Job::completed_task_count().set(self.completed_task_count),
Job::date_modified().set(chrono::Utc::now()),
])
.exec()
.await;
self.completed_task_count = completed_task_count;
core.send(CoreEvent::InvalidateQuery(ClientQuery::JobGetRunning)).await;
Ok(self.clone())
Ok(())
}
pub fn set_progress(&mut self, completed_task_count: Option<i64>, message: Option<String>) -> &Self {
if let Some(count) = completed_task_count {
self.completed_task_count = count;
self.percentage_complete = (count as f64 / self.task_count as f64 * 100.0) as i64;
}
if let Some(msg) = message {
self.message = msg;
}
self
}
pub fn set_status(&mut self, status: JobStatus, task_count: Option<i64>) -> &Self {
self.status = status;
if let Some(count) = task_count {
self.task_count = count;
}
self.set_progress(None, Some("Starting job...".to_string()));
self
}
pub async fn update_status(&mut self, core: &Core, status: JobStatus) -> Result<Self> {
let db = get().await.unwrap();
db.job()
.find_unique(Job::id().equals(self.id))
.update(vec![Job::status().set(status.int_value())])
pub async fn get_running() -> Result<Vec<JobResource>, JobError> {
let db = get().await?;
let jobs = db
.job()
.find_many(vec![Job::status().equals(JobStatus::Running.int_value())])
.exec()
.await;
self.status = status;
Ok(jobs.into_iter().map(|j| j.into()).collect())
}
core.send(CoreEvent::InvalidateQuery(ClientQuery::JobGetRunning)).await;
pub async fn get_history() -> Result<Vec<JobResource>, JobError> {
let db = get().await?;
let jobs = db
.job()
.find_many(vec![
Job::status().equals(JobStatus::Completed.int_value()),
Job::status().equals(JobStatus::Canceled.int_value()),
Job::status().equals(JobStatus::Queued.int_value()),
])
.exec()
.await;
Ok(self.clone())
Ok(jobs.into_iter().map(|j| j.into()).collect())
}
}
#[derive(Error, Debug)]
pub enum JobError {
#[error("Failed to create job (job_id {job_id:?})")]
CreateFailure { job_id: String },
#[error("Database error")]
DatabaseError(#[from] db::DatabaseError),
}

View file

@ -1,5 +1,6 @@
use anyhow::Result;
use crypto::encryption::EncryptionAlgorithm;
use job::JobResource;
use log::{error, info};
use serde::{Deserialize, Serialize};
use state::client::ClientState;
@ -25,6 +26,7 @@ pub mod util;
pub struct Core {
pub event_sender: mpsc::Sender<CoreEvent>,
pub state: ClientState,
pub job_runner: job::JobRunner,
}
impl Core {
@ -39,12 +41,20 @@ impl Core {
// prepare basic client state
let mut state = ClientState::new(data_dir, "diamond-mastering-space-dragon").unwrap();
// load from disk
state.read_disk().unwrap_or(error!("No client state found, creating new one..."));
state
.read_disk()
.unwrap_or(error!("No client state found, creating new one..."));
state.save();
let core = Core { event_sender, state };
let core = Core {
event_sender,
state,
job_runner: job::JobRunner::new(),
};
core.initializer().await;
(core, event_receiver)
// activate p2p listeners
// p2p::listener::listen(None);
@ -59,7 +69,7 @@ impl Core {
} else {
for library in self.state.libraries.iter() {
// init database for library
match library::loader::load(&library.library_path, &library.library_id).await {
match library::loader::load(&library.library_path, &library.library_uuid).await {
Ok(library) => info!("Loaded library: {:?}", library),
Err(e) => info!("Error loading library: {:?}", e),
}
@ -71,6 +81,10 @@ impl Core {
Err(e) => info!("Error initializing client: {:?}", e),
};
}
pub fn queue(&mut self, job: JobResource) -> &mut JobResource {
self.job_runner.queued_jobs.push(job);
self.job_runner.queued_jobs.last_mut().unwrap()
}
pub async fn command(&self, cmd: ClientCommand) -> Result<CoreResponse, CoreError> {
info!("Core command: {:?}", cmd);
Ok(match cmd {
@ -103,10 +117,12 @@ impl Core {
// get location from library
ClientQuery::SysGetLocation { id } => CoreResponse::SysGetLocation(sys::locations::get_location(id).await?),
// return contents of a directory for the explorer
ClientQuery::LibGetExplorerDir { path, limit: _ } => CoreResponse::LibGetExplorerDir(file::explorer::open_dir(&path).await?),
ClientQuery::LibGetExplorerDir { path, limit: _ } => {
CoreResponse::LibGetExplorerDir(file::explorer::open_dir(&path).await?)
},
ClientQuery::LibGetTags => todo!(),
ClientQuery::JobGetRunning => todo!(),
ClientQuery::JobGetHistory => todo!(),
ClientQuery::JobGetRunning => CoreResponse::JobGetRunning(job::JobResource::get_running().await?),
ClientQuery::JobGetHistory => CoreResponse::JobGetHistory(job::JobResource::get_history().await?),
})
}
// send an event to the client
@ -186,6 +202,8 @@ pub enum CoreError {
SysError(#[from] sys::SysError),
#[error("File error")]
FileError(#[from] file::FileError),
#[error("Job error")]
JobError(#[from] job::JobError),
#[error("Database error")]
DatabaseError(#[from] db::DatabaseError),
}

View file

@ -22,7 +22,7 @@ pub async fn get() -> Result<LibraryData> {
// get library from db
let library = match db
.library()
.find_unique(Library::uuid().equals(library_state.library_id.clone()))
.find_unique(Library::uuid().equals(library_state.library_uuid.clone()))
.exec()
.await
{
@ -43,8 +43,8 @@ pub async fn load(library_path: &str, library_id: &str) -> Result<()> {
println!("Initializing library: {} {}", &library_id, library_path);
if config.current_library_id != library_id {
config.current_library_id = library_id.to_string();
if config.current_library_uuid != library_id {
config.current_library_uuid = library_id.to_string();
config.save();
}
// create connection with library database & run migrations
@ -61,7 +61,7 @@ pub async fn create(name: Option<String>) -> Result<()> {
println!("Creating library {:?}, UUID: {:?}", name, uuid);
let library_state = LibraryState {
library_id: uuid.clone(),
library_uuid: uuid.clone(),
library_path: format!("{}/{}", config.data_path, LIBRARY_DB_NAME),
..LibraryState::default()
};
@ -70,7 +70,7 @@ pub async fn create(name: Option<String>) -> Result<()> {
config.libraries.push(library_state);
config.current_library_id = uuid;
config.current_library_uuid = uuid;
config.save();
@ -79,7 +79,7 @@ pub async fn create(name: Option<String>) -> Result<()> {
let _library = db
.library()
.create_one(
Library::uuid().set(config.current_library_id),
Library::uuid().set(config.current_library_uuid),
Library::name().set(name.unwrap_or(DEFAULT_NAME.into())),
vec![],
)

View file

@ -13,9 +13,12 @@ pub struct PrismaClient {
query_schema: Arc<QuerySchema>,
}
pub async fn new_client() -> PrismaClient {
let datamodel_str = "datasource db {\n provider = \"sqlite\"\n url = \"file:dev.db\"\n}\n\ngenerator client {\n provider = \"prisma-client-rust\"\n output = \"../src/prisma.rs\"\n}\n\n// generator js {\n// provider = \"prisma-client-js\"\n// output = \"../types\"\n// }\n\nmodel Migration {\n id Int @id @default(autoincrement())\n name String\n checksum String @unique\n steps_applied Int @default(0)\n applied_at DateTime @default(now())\n\n @@map(\"_migrations\")\n}\n\nmodel Library {\n id Int @id @default(autoincrement())\n uuid String @unique\n name String\n remote_id String?\n is_primary Boolean @default(true)\n encryption Int @default(0)\n date_created DateTime @default(now())\n timezone String?\n spaces Space[]\n\n @@map(\"libraries\")\n}\n\nmodel LibraryStatistics {\n id Int @id @default(autoincrement())\n date_captured DateTime @default(now())\n library_id Int @unique\n total_file_count Int @default(0)\n total_bytes_used String @default(\"0\")\n total_byte_capacity String @default(\"0\")\n total_unique_bytes String @default(\"0\")\n\n @@map(\"library_statistics\")\n}\n\nmodel Client {\n id Int @id @default(autoincrement())\n uuid String @unique\n name String\n platform Int @default(0)\n version String?\n online Boolean? @default(true)\n last_seen DateTime @default(now())\n timezone String?\n date_created DateTime @default(now())\n jobs Job[]\n\n @@map(\"clients\")\n}\n\nmodel Location {\n id Int @id @default(autoincrement())\n name String?\n path String?\n total_capacity Int?\n available_capacity Int?\n is_removable Boolean @default(true)\n is_ejectable Boolean @default(true)\n is_root_filesystem Boolean @default(true)\n is_online Boolean @default(true)\n date_created DateTime @default(now())\n files File[]\n\n @@map(\"locations\")\n}\n\nmodel File {\n id Int @id @default(autoincrement())\n is_dir Boolean @default(false)\n location_id Int\n stem String\n name String\n extension String?\n quick_checksum String? // 100 * 100 byte samples\n full_checksum String? // full byte to byte hash\n size_in_bytes String\n encryption Int @default(0)\n date_created DateTime @default(now())\n date_modified DateTime @default(now())\n date_indexed DateTime @default(now())\n ipfs_id String?\n\n location Location? @relation(fields: [location_id], references: [id], onDelete: NoAction, onUpdate: NoAction)\n\n parent File? @relation(\"directory_files\", fields: [parent_id], references: [id])\n parent_id Int?\n children File[] @relation(\"directory_files\")\n\n file_tags TagOnFile[]\n @@unique([location_id, stem, name, extension])\n @@map(\"files\")\n}\n\nmodel Tag {\n id Int @id @default(autoincrement())\n name String?\n encryption Int? @default(0)\n total_files Int? @default(0)\n redundancy_goal Int? @default(1)\n date_created DateTime @default(now())\n date_modified DateTime @default(now())\n\n tag_files TagOnFile[]\n\n @@map(\"tags\")\n}\n\nmodel TagOnFile {\n date_created DateTime @default(now())\n\n tag_id Int\n tag Tag @relation(fields: [tag_id], references: [id], onDelete: NoAction, onUpdate: NoAction)\n\n file_id Int\n file File @relation(fields: [file_id], references: [id], onDelete: NoAction, onUpdate: NoAction)\n\n @@id([tag_id, file_id])\n @@map(\"tags_on_files\")\n}\n\nmodel Job {\n id Int @id @default(autoincrement())\n client_id Int\n action Int\n status Int @default(0)\n percentage_complete Int @default(0)\n task_count Int @default(1)\n completed_task_count Int @default(0)\n date_created DateTime @default(now())\n date_modified DateTime @default(now())\n clients Client @relation(fields: [client_id], references: [id], onDelete: NoAction, onUpdate: NoAction)\n\n @@map(\"jobs\")\n}\n\nmodel Space {\n id Int @id @default(autoincrement())\n name String\n encryption Int? @default(0)\n date_created DateTime @default(now())\n date_modified DateTime @default(now())\n\n Library Library? @relation(fields: [libraryId], references: [id])\n libraryId Int?\n @@map(\"spaces\")\n}\n" ;
let datamodel_str = "datasource db {\n provider = \"sqlite\"\n url = \"file:dev.db\"\n}\n\ngenerator client {\n provider = \"prisma-client-rust\"\n output = \"../src/prisma.rs\"\n}\n\n// generator js {\n// provider = \"prisma-client-js\"\n// output = \"../types\"\n// }\n\nmodel Migration {\n id Int @id @default(autoincrement())\n name String\n checksum String @unique\n steps_applied Int @default(0)\n applied_at DateTime @default(now())\n\n @@map(\"_migrations\")\n}\n\nmodel Library {\n id Int @id @default(autoincrement())\n uuid String @unique\n name String\n remote_id String?\n is_primary Boolean @default(true)\n encryption Int @default(0)\n date_created DateTime @default(now())\n timezone String?\n spaces Space[]\n\n @@map(\"libraries\")\n}\n\nmodel LibraryStatistics {\n id Int @id @default(autoincrement())\n date_captured DateTime @default(now())\n library_id Int @unique\n total_file_count Int @default(0)\n total_bytes_used String @default(\"0\")\n total_byte_capacity String @default(\"0\")\n total_unique_bytes String @default(\"0\")\n\n @@map(\"library_statistics\")\n}\n\nmodel Client {\n id Int @id @default(autoincrement())\n uuid String @unique\n name String\n platform Int @default(0)\n version String?\n online Boolean? @default(true)\n last_seen DateTime @default(now())\n timezone String?\n date_created DateTime @default(now())\n jobs Job[]\n\n @@map(\"clients\")\n}\n\nmodel Location {\n id Int @id @default(autoincrement())\n name String?\n path String?\n total_capacity Int?\n available_capacity Int?\n is_removable Boolean @default(true)\n is_ejectable Boolean @default(true)\n is_root_filesystem Boolean @default(true)\n is_online Boolean @default(true)\n date_created DateTime @default(now())\n files File[]\n\n @@map(\"locations\")\n}\n\nmodel File {\n id Int @id @default(autoincrement())\n is_dir Boolean @default(false)\n location_id Int\n stem String\n name String\n extension String?\n quick_checksum String? // 100 * 100 byte samples\n full_checksum String? // full byte to byte hash\n size_in_bytes String\n encryption Int @default(0)\n date_created DateTime @default(now())\n date_modified DateTime @default(now())\n date_indexed DateTime @default(now())\n ipfs_id String?\n\n location Location? @relation(fields: [location_id], references: [id], onDelete: NoAction, onUpdate: NoAction)\n\n parent File? @relation(\"directory_files\", fields: [parent_id], references: [id])\n parent_id Int?\n children File[] @relation(\"directory_files\")\n\n file_tags TagOnFile[]\n @@unique([location_id, stem, name, extension])\n @@map(\"files\")\n}\n\nmodel Tag {\n id Int @id @default(autoincrement())\n name String?\n encryption Int? @default(0)\n total_files Int? @default(0)\n redundancy_goal Int? @default(1)\n date_created DateTime @default(now())\n date_modified DateTime @default(now())\n\n tag_files TagOnFile[]\n\n @@map(\"tags\")\n}\n\nmodel TagOnFile {\n date_created DateTime @default(now())\n\n tag_id Int\n tag Tag @relation(fields: [tag_id], references: [id], onDelete: NoAction, onUpdate: NoAction)\n\n file_id Int\n file File @relation(fields: [file_id], references: [id], onDelete: NoAction, onUpdate: NoAction)\n\n @@id([tag_id, file_id])\n @@map(\"tags_on_files\")\n}\n\nmodel Job {\n id Int @id @default(autoincrement())\n client_id Int\n action Int\n status Int @default(0)\n task_count Int @default(1)\n completed_task_count Int @default(0)\n date_created DateTime @default(now())\n date_modified DateTime @default(now())\n clients Client @relation(fields: [client_id], references: [id], onDelete: NoAction, onUpdate: NoAction)\n\n @@map(\"jobs\")\n}\n\nmodel Space {\n id Int @id @default(autoincrement())\n name String\n encryption Int? @default(0)\n date_created DateTime @default(now())\n date_modified DateTime @default(now())\n\n Library Library? @relation(fields: [libraryId], references: [id])\n libraryId Int?\n @@map(\"spaces\")\n}\n" ;
let config = parse_configuration(datamodel_str).unwrap().subject;
let source = config.datasources.first().expect("Pleasy supply a datasource in your schema.prisma file");
let source = config
.datasources
.first()
.expect("Pleasy supply a datasource in your schema.prisma file");
let url = if let Some(url) = source.load_shadow_database_url().unwrap() {
url
} else {
@ -36,9 +39,12 @@ pub async fn new_client() -> PrismaClient {
new_client_with_url(&url).await
}
pub async fn new_client_with_url(url: &str) -> PrismaClient {
let datamodel_str = "datasource db {\n provider = \"sqlite\"\n url = \"file:dev.db\"\n}\n\ngenerator client {\n provider = \"prisma-client-rust\"\n output = \"../src/prisma.rs\"\n}\n\n// generator js {\n// provider = \"prisma-client-js\"\n// output = \"../types\"\n// }\n\nmodel Migration {\n id Int @id @default(autoincrement())\n name String\n checksum String @unique\n steps_applied Int @default(0)\n applied_at DateTime @default(now())\n\n @@map(\"_migrations\")\n}\n\nmodel Library {\n id Int @id @default(autoincrement())\n uuid String @unique\n name String\n remote_id String?\n is_primary Boolean @default(true)\n encryption Int @default(0)\n date_created DateTime @default(now())\n timezone String?\n spaces Space[]\n\n @@map(\"libraries\")\n}\n\nmodel LibraryStatistics {\n id Int @id @default(autoincrement())\n date_captured DateTime @default(now())\n library_id Int @unique\n total_file_count Int @default(0)\n total_bytes_used String @default(\"0\")\n total_byte_capacity String @default(\"0\")\n total_unique_bytes String @default(\"0\")\n\n @@map(\"library_statistics\")\n}\n\nmodel Client {\n id Int @id @default(autoincrement())\n uuid String @unique\n name String\n platform Int @default(0)\n version String?\n online Boolean? @default(true)\n last_seen DateTime @default(now())\n timezone String?\n date_created DateTime @default(now())\n jobs Job[]\n\n @@map(\"clients\")\n}\n\nmodel Location {\n id Int @id @default(autoincrement())\n name String?\n path String?\n total_capacity Int?\n available_capacity Int?\n is_removable Boolean @default(true)\n is_ejectable Boolean @default(true)\n is_root_filesystem Boolean @default(true)\n is_online Boolean @default(true)\n date_created DateTime @default(now())\n files File[]\n\n @@map(\"locations\")\n}\n\nmodel File {\n id Int @id @default(autoincrement())\n is_dir Boolean @default(false)\n location_id Int\n stem String\n name String\n extension String?\n quick_checksum String? // 100 * 100 byte samples\n full_checksum String? // full byte to byte hash\n size_in_bytes String\n encryption Int @default(0)\n date_created DateTime @default(now())\n date_modified DateTime @default(now())\n date_indexed DateTime @default(now())\n ipfs_id String?\n\n location Location? @relation(fields: [location_id], references: [id], onDelete: NoAction, onUpdate: NoAction)\n\n parent File? @relation(\"directory_files\", fields: [parent_id], references: [id])\n parent_id Int?\n children File[] @relation(\"directory_files\")\n\n file_tags TagOnFile[]\n @@unique([location_id, stem, name, extension])\n @@map(\"files\")\n}\n\nmodel Tag {\n id Int @id @default(autoincrement())\n name String?\n encryption Int? @default(0)\n total_files Int? @default(0)\n redundancy_goal Int? @default(1)\n date_created DateTime @default(now())\n date_modified DateTime @default(now())\n\n tag_files TagOnFile[]\n\n @@map(\"tags\")\n}\n\nmodel TagOnFile {\n date_created DateTime @default(now())\n\n tag_id Int\n tag Tag @relation(fields: [tag_id], references: [id], onDelete: NoAction, onUpdate: NoAction)\n\n file_id Int\n file File @relation(fields: [file_id], references: [id], onDelete: NoAction, onUpdate: NoAction)\n\n @@id([tag_id, file_id])\n @@map(\"tags_on_files\")\n}\n\nmodel Job {\n id Int @id @default(autoincrement())\n client_id Int\n action Int\n status Int @default(0)\n percentage_complete Int @default(0)\n task_count Int @default(1)\n completed_task_count Int @default(0)\n date_created DateTime @default(now())\n date_modified DateTime @default(now())\n clients Client @relation(fields: [client_id], references: [id], onDelete: NoAction, onUpdate: NoAction)\n\n @@map(\"jobs\")\n}\n\nmodel Space {\n id Int @id @default(autoincrement())\n name String\n encryption Int? @default(0)\n date_created DateTime @default(now())\n date_modified DateTime @default(now())\n\n Library Library? @relation(fields: [libraryId], references: [id])\n libraryId Int?\n @@map(\"spaces\")\n}\n" ;
let datamodel_str = "datasource db {\n provider = \"sqlite\"\n url = \"file:dev.db\"\n}\n\ngenerator client {\n provider = \"prisma-client-rust\"\n output = \"../src/prisma.rs\"\n}\n\n// generator js {\n// provider = \"prisma-client-js\"\n// output = \"../types\"\n// }\n\nmodel Migration {\n id Int @id @default(autoincrement())\n name String\n checksum String @unique\n steps_applied Int @default(0)\n applied_at DateTime @default(now())\n\n @@map(\"_migrations\")\n}\n\nmodel Library {\n id Int @id @default(autoincrement())\n uuid String @unique\n name String\n remote_id String?\n is_primary Boolean @default(true)\n encryption Int @default(0)\n date_created DateTime @default(now())\n timezone String?\n spaces Space[]\n\n @@map(\"libraries\")\n}\n\nmodel LibraryStatistics {\n id Int @id @default(autoincrement())\n date_captured DateTime @default(now())\n library_id Int @unique\n total_file_count Int @default(0)\n total_bytes_used String @default(\"0\")\n total_byte_capacity String @default(\"0\")\n total_unique_bytes String @default(\"0\")\n\n @@map(\"library_statistics\")\n}\n\nmodel Client {\n id Int @id @default(autoincrement())\n uuid String @unique\n name String\n platform Int @default(0)\n version String?\n online Boolean? @default(true)\n last_seen DateTime @default(now())\n timezone String?\n date_created DateTime @default(now())\n jobs Job[]\n\n @@map(\"clients\")\n}\n\nmodel Location {\n id Int @id @default(autoincrement())\n name String?\n path String?\n total_capacity Int?\n available_capacity Int?\n is_removable Boolean @default(true)\n is_ejectable Boolean @default(true)\n is_root_filesystem Boolean @default(true)\n is_online Boolean @default(true)\n date_created DateTime @default(now())\n files File[]\n\n @@map(\"locations\")\n}\n\nmodel File {\n id Int @id @default(autoincrement())\n is_dir Boolean @default(false)\n location_id Int\n stem String\n name String\n extension String?\n quick_checksum String? // 100 * 100 byte samples\n full_checksum String? // full byte to byte hash\n size_in_bytes String\n encryption Int @default(0)\n date_created DateTime @default(now())\n date_modified DateTime @default(now())\n date_indexed DateTime @default(now())\n ipfs_id String?\n\n location Location? @relation(fields: [location_id], references: [id], onDelete: NoAction, onUpdate: NoAction)\n\n parent File? @relation(\"directory_files\", fields: [parent_id], references: [id])\n parent_id Int?\n children File[] @relation(\"directory_files\")\n\n file_tags TagOnFile[]\n @@unique([location_id, stem, name, extension])\n @@map(\"files\")\n}\n\nmodel Tag {\n id Int @id @default(autoincrement())\n name String?\n encryption Int? @default(0)\n total_files Int? @default(0)\n redundancy_goal Int? @default(1)\n date_created DateTime @default(now())\n date_modified DateTime @default(now())\n\n tag_files TagOnFile[]\n\n @@map(\"tags\")\n}\n\nmodel TagOnFile {\n date_created DateTime @default(now())\n\n tag_id Int\n tag Tag @relation(fields: [tag_id], references: [id], onDelete: NoAction, onUpdate: NoAction)\n\n file_id Int\n file File @relation(fields: [file_id], references: [id], onDelete: NoAction, onUpdate: NoAction)\n\n @@id([tag_id, file_id])\n @@map(\"tags_on_files\")\n}\n\nmodel Job {\n id Int @id @default(autoincrement())\n client_id Int\n action Int\n status Int @default(0)\n task_count Int @default(1)\n completed_task_count Int @default(0)\n date_created DateTime @default(now())\n date_modified DateTime @default(now())\n clients Client @relation(fields: [client_id], references: [id], onDelete: NoAction, onUpdate: NoAction)\n\n @@map(\"jobs\")\n}\n\nmodel Space {\n id Int @id @default(autoincrement())\n name String\n encryption Int? @default(0)\n date_created DateTime @default(now())\n date_modified DateTime @default(now())\n\n Library Library? @relation(fields: [libraryId], references: [id])\n libraryId Int?\n @@map(\"spaces\")\n}\n" ;
let config = parse_configuration(datamodel_str).unwrap().subject;
let source = config.datasources.first().expect("Pleasy supply a datasource in your schema.prisma file");
let source = config
.datasources
.first()
.expect("Pleasy supply a datasource in your schema.prisma file");
let (db_name, executor) = executor::load(&source, &[], &url).await.unwrap();
let internal_model = InternalDataModelBuilder::new(&datamodel_str).build(db_name);
let query_schema = Arc::new(schema_builder::build(
@ -852,7 +858,12 @@ impl<'a> MigrationActions<'a> {
};
MigrationFindMany { query }
}
pub fn create_one(&self, name: MigrationSetName, checksum: MigrationSetChecksum, params: Vec<MigrationSetParam>) -> MigrationCreateOne {
pub fn create_one(
&self,
name: MigrationSetName,
checksum: MigrationSetChecksum,
params: Vec<MigrationSetParam>,
) -> MigrationCreateOne {
let mut input_fields = params.into_iter().map(|p| p.field()).collect::<Vec<_>>();
input_fields.push(MigrationSetParam::from(name).field());
input_fields.push(MigrationSetParam::from(checksum).field());
@ -1630,7 +1641,9 @@ impl LibrarySetParam {
name: "spaces".into(),
fields: Some(vec![Field {
name: "connect".into(),
fields: Some(transform_equals(where_params.into_iter().map(|item| item.field()).collect())),
fields: Some(transform_equals(
where_params.into_iter().map(|item| item.field()).collect(),
)),
list: true,
wrap_list: true,
..Default::default()
@ -1643,7 +1656,9 @@ impl LibrarySetParam {
name: "disconnect".into(),
list: true,
wrap_list: true,
fields: Some(transform_equals(where_params.into_iter().map(|item| item.field()).collect())),
fields: Some(transform_equals(
where_params.into_iter().map(|item| item.field()).collect(),
)),
..Default::default()
}]),
..Default::default()
@ -1880,7 +1895,12 @@ impl<'a> LibraryActions<'a> {
};
LibraryFindMany { query }
}
pub fn create_one(&self, uuid: LibrarySetUuid, name: LibrarySetName, params: Vec<LibrarySetParam>) -> LibraryCreateOne {
pub fn create_one(
&self,
uuid: LibrarySetUuid,
name: LibrarySetName,
params: Vec<LibrarySetParam>,
) -> LibraryCreateOne {
let mut input_fields = params.into_iter().map(|p| p.field()).collect::<Vec<_>>();
input_fields.push(LibrarySetParam::from(uuid).field());
input_fields.push(LibrarySetParam::from(name).field());
@ -2787,7 +2807,11 @@ impl<'a> LibraryStatisticsActions<'a> {
};
LibraryStatisticsFindMany { query }
}
pub fn create_one(&self, library_id: LibraryStatisticsSetLibraryId, params: Vec<LibraryStatisticsSetParam>) -> LibraryStatisticsCreateOne {
pub fn create_one(
&self,
library_id: LibraryStatisticsSetLibraryId,
params: Vec<LibraryStatisticsSetParam>,
) -> LibraryStatisticsCreateOne {
let mut input_fields = params.into_iter().map(|p| p.field()).collect::<Vec<_>>();
input_fields.push(LibraryStatisticsSetParam::from(library_id).field());
let query = Query {
@ -3653,7 +3677,9 @@ impl ClientSetParam {
name: "jobs".into(),
fields: Some(vec![Field {
name: "connect".into(),
fields: Some(transform_equals(where_params.into_iter().map(|item| item.field()).collect())),
fields: Some(transform_equals(
where_params.into_iter().map(|item| item.field()).collect(),
)),
list: true,
wrap_list: true,
..Default::default()
@ -3666,7 +3692,9 @@ impl ClientSetParam {
name: "disconnect".into(),
list: true,
wrap_list: true,
fields: Some(transform_equals(where_params.into_iter().map(|item| item.field()).collect())),
fields: Some(transform_equals(
where_params.into_iter().map(|item| item.field()).collect(),
)),
..Default::default()
}]),
..Default::default()
@ -4729,7 +4757,9 @@ impl LocationSetParam {
name: "files".into(),
fields: Some(vec![Field {
name: "connect".into(),
fields: Some(transform_equals(where_params.into_iter().map(|item| item.field()).collect())),
fields: Some(transform_equals(
where_params.into_iter().map(|item| item.field()).collect(),
)),
list: true,
wrap_list: true,
..Default::default()
@ -4742,7 +4772,9 @@ impl LocationSetParam {
name: "disconnect".into(),
list: true,
wrap_list: true,
fields: Some(transform_equals(where_params.into_iter().map(|item| item.field()).collect())),
fields: Some(transform_equals(
where_params.into_iter().map(|item| item.field()).collect(),
)),
..Default::default()
}]),
..Default::default()
@ -6541,7 +6573,9 @@ impl FileSetParam {
name: "children".into(),
fields: Some(vec![Field {
name: "connect".into(),
fields: Some(transform_equals(where_params.into_iter().map(|item| item.field()).collect())),
fields: Some(transform_equals(
where_params.into_iter().map(|item| item.field()).collect(),
)),
list: true,
wrap_list: true,
..Default::default()
@ -6554,7 +6588,9 @@ impl FileSetParam {
name: "disconnect".into(),
list: true,
wrap_list: true,
fields: Some(transform_equals(where_params.into_iter().map(|item| item.field()).collect())),
fields: Some(transform_equals(
where_params.into_iter().map(|item| item.field()).collect(),
)),
..Default::default()
}]),
..Default::default()
@ -6563,7 +6599,9 @@ impl FileSetParam {
name: "file_tags".into(),
fields: Some(vec![Field {
name: "connect".into(),
fields: Some(transform_equals(where_params.into_iter().map(|item| item.field()).collect())),
fields: Some(transform_equals(
where_params.into_iter().map(|item| item.field()).collect(),
)),
list: true,
wrap_list: true,
..Default::default()
@ -6576,7 +6614,9 @@ impl FileSetParam {
name: "disconnect".into(),
list: true,
wrap_list: true,
fields: Some(transform_equals(where_params.into_iter().map(|item| item.field()).collect())),
fields: Some(transform_equals(
where_params.into_iter().map(|item| item.field()).collect(),
)),
..Default::default()
}]),
..Default::default()
@ -6813,7 +6853,13 @@ impl<'a> FileActions<'a> {
};
FileFindMany { query }
}
pub fn create_one(&self, stem: FileSetStem, name: FileSetName, size_in_bytes: FileSetSizeInBytes, params: Vec<FileSetParam>) -> FileCreateOne {
pub fn create_one(
&self,
stem: FileSetStem,
name: FileSetName,
size_in_bytes: FileSetSizeInBytes,
params: Vec<FileSetParam>,
) -> FileCreateOne {
let mut input_fields = params.into_iter().map(|p| p.field()).collect::<Vec<_>>();
input_fields.push(FileSetParam::from(stem).field());
input_fields.push(FileSetParam::from(name).field());
@ -7594,7 +7640,9 @@ impl TagSetParam {
name: "tag_files".into(),
fields: Some(vec![Field {
name: "connect".into(),
fields: Some(transform_equals(where_params.into_iter().map(|item| item.field()).collect())),
fields: Some(transform_equals(
where_params.into_iter().map(|item| item.field()).collect(),
)),
list: true,
wrap_list: true,
..Default::default()
@ -7607,7 +7655,9 @@ impl TagSetParam {
name: "disconnect".into(),
list: true,
wrap_list: true,
fields: Some(transform_equals(where_params.into_iter().map(|item| item.field()).collect())),
fields: Some(transform_equals(
where_params.into_iter().map(|item| item.field()).collect(),
)),
..Default::default()
}]),
..Default::default()
@ -7863,7 +7913,11 @@ impl<'a> TagActions<'a> {
}
}
fn tag_on_file_outputs() -> Vec<Output> {
vec![Output::new("date_created"), Output::new("tag_id"), Output::new("file_id")]
vec![
Output::new("date_created"),
Output::new("tag_id"),
Output::new("file_id"),
]
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TagOnFileData {
@ -8541,7 +8595,12 @@ impl<'a> TagOnFileActions<'a> {
};
TagOnFileFindMany { query }
}
pub fn create_one(&self, tag: TagOnFileLinkTag, file: TagOnFileLinkFile, params: Vec<TagOnFileSetParam>) -> TagOnFileCreateOne {
pub fn create_one(
&self,
tag: TagOnFileLinkTag,
file: TagOnFileLinkFile,
params: Vec<TagOnFileSetParam>,
) -> TagOnFileCreateOne {
let mut input_fields = params.into_iter().map(|p| p.field()).collect::<Vec<_>>();
input_fields.push(TagOnFileSetParam::from(tag).field());
input_fields.push(TagOnFileSetParam::from(file).field());
@ -8567,7 +8626,6 @@ fn job_outputs() -> Vec<Output> {
Output::new("client_id"),
Output::new("action"),
Output::new("status"),
Output::new("percentage_complete"),
Output::new("task_count"),
Output::new("completed_task_count"),
Output::new("date_created"),
@ -8584,8 +8642,6 @@ pub struct JobData {
pub action: i64,
#[serde(rename = "status")]
pub status: i64,
#[serde(rename = "percentage_complete")]
pub percentage_complete: i64,
#[serde(rename = "task_count")]
pub task_count: i64,
#[serde(rename = "completed_task_count")]
@ -8619,9 +8675,6 @@ impl Job {
pub fn status() -> JobStatusField {
JobStatusField {}
}
pub fn percentage_complete() -> JobPercentageCompleteField {
JobPercentageCompleteField {}
}
pub fn task_count() -> JobTaskCountField {
JobTaskCountField {}
}
@ -8746,33 +8799,6 @@ impl JobStatusField {
JobSetStatus(value).into()
}
}
pub struct JobPercentageCompleteField {}
pub struct JobSetPercentageComplete(i64);
impl From<JobSetPercentageComplete> for JobSetParam {
fn from(value: JobSetPercentageComplete) -> Self {
Self::PercentageComplete(value.0)
}
}
impl JobPercentageCompleteField {
pub fn lt(&self, value: i64) -> JobWhereParam {
JobWhereParam::PercentageCompleteLT(value)
}
pub fn gt(&self, value: i64) -> JobWhereParam {
JobWhereParam::PercentageCompleteGT(value)
}
pub fn lte(&self, value: i64) -> JobWhereParam {
JobWhereParam::PercentageCompleteLTE(value)
}
pub fn gte(&self, value: i64) -> JobWhereParam {
JobWhereParam::PercentageCompleteGTE(value)
}
pub fn equals(&self, value: i64) -> JobWhereParam {
JobWhereParam::PercentageCompleteEquals(value)
}
pub fn set<T: From<JobSetPercentageComplete>>(&self, value: i64) -> T {
JobSetPercentageComplete(value).into()
}
}
pub struct JobTaskCountField {}
pub struct JobSetTaskCount(i64);
impl From<JobSetTaskCount> for JobSetParam {
@ -8920,11 +8946,6 @@ pub enum JobWhereParam {
StatusLTE(i64),
StatusGTE(i64),
StatusEquals(i64),
PercentageCompleteLT(i64),
PercentageCompleteGT(i64),
PercentageCompleteLTE(i64),
PercentageCompleteGTE(i64),
PercentageCompleteEquals(i64),
TaskCountLT(i64),
TaskCountGT(i64),
TaskCountLTE(i64),
@ -9133,51 +9154,6 @@ impl JobWhereParam {
}]),
..Default::default()
},
Self::PercentageCompleteLT(value) => Field {
name: "percentage_complete".into(),
fields: Some(vec![Field {
name: "lt".into(),
value: Some(serde_json::to_value(value).unwrap()),
..Default::default()
}]),
..Default::default()
},
Self::PercentageCompleteGT(value) => Field {
name: "percentage_complete".into(),
fields: Some(vec![Field {
name: "gt".into(),
value: Some(serde_json::to_value(value).unwrap()),
..Default::default()
}]),
..Default::default()
},
Self::PercentageCompleteLTE(value) => Field {
name: "percentage_complete".into(),
fields: Some(vec![Field {
name: "lte".into(),
value: Some(serde_json::to_value(value).unwrap()),
..Default::default()
}]),
..Default::default()
},
Self::PercentageCompleteGTE(value) => Field {
name: "percentage_complete".into(),
fields: Some(vec![Field {
name: "gte".into(),
value: Some(serde_json::to_value(value).unwrap()),
..Default::default()
}]),
..Default::default()
},
Self::PercentageCompleteEquals(value) => Field {
name: "percentage_complete".into(),
fields: Some(vec![Field {
name: "equals".into(),
value: Some(serde_json::to_value(value).unwrap()),
..Default::default()
}]),
..Default::default()
},
Self::TaskCountLT(value) => Field {
name: "task_count".into(),
fields: Some(vec![Field {
@ -9427,7 +9403,6 @@ pub enum JobSetParam {
ClientId(i64),
Action(i64),
Status(i64),
PercentageComplete(i64),
TaskCount(i64),
CompletedTaskCount(i64),
DateCreated(chrono::DateTime<chrono::Utc>),
@ -9457,11 +9432,6 @@ impl JobSetParam {
value: Some(serde_json::to_value(value).unwrap()),
..Default::default()
},
Self::PercentageComplete(value) => Field {
name: "percentage_complete".into(),
value: Some(serde_json::to_value(value).unwrap()),
..Default::default()
},
Self::TaskCount(value) => Field {
name: "task_count".into(),
value: Some(serde_json::to_value(value).unwrap()),

View file

@ -11,7 +11,7 @@ use uuid::Uuid;
#[ts(export)]
pub struct ClientState {
// client id is a uniquely generated UUID
pub client_id: String,
pub client_uuid: String,
// client_name is the name of the device running the client
pub client_name: String,
// config path is stored as struct can exist only in memory during startup and be written to disk later without supplying path
@ -21,7 +21,7 @@ pub struct ClientState {
// all the libraries loaded by this client
pub libraries: Vec<LibraryState>,
// used to quickly find the default library
pub current_library_id: String,
pub current_library_uuid: String,
}
pub static CLIENT_STATE_CONFIG_NAME: &str = "client_state.json";
@ -29,7 +29,7 @@ pub static CLIENT_STATE_CONFIG_NAME: &str = "client_state.json";
#[derive(Debug, Serialize, Deserialize, Clone, Default, TS)]
#[ts(export)]
pub struct LibraryState {
pub library_id: String,
pub library_uuid: String,
pub library_path: String,
pub offline: bool,
}
@ -51,7 +51,7 @@ impl ClientState {
let uuid = Uuid::new_v4().to_string();
// create struct and assign defaults
let config = Self {
client_id: uuid,
client_uuid: uuid,
data_path: data_path.to_string(),
client_name: client_name.to_string(),
..Default::default()
@ -89,7 +89,11 @@ impl ClientState {
}
pub fn get_current_library(&self) -> LibraryState {
match self.libraries.iter().find(|lib| lib.library_id == self.current_library_id) {
match self
.libraries
.iter()
.find(|lib| lib.library_uuid == self.current_library_uuid)
{
Some(lib) => lib.clone(),
None => LibraryState::default(),
}

View file

@ -97,7 +97,12 @@ pub async fn create_location(path: &str) -> Result<LocationResource, SysError> {
Err(e) => Err(LocationError::FileReadError(e))?,
}
// check if location already exists
let location = match db.location().find_first(vec![Location::path().equals(path.to_string())]).exec().await {
let location = match db
.location()
.find_first(vec![Location::path().equals(path.to_string())])
.exec()
.await
{
Some(location) => location,
None => {
info!("Location does not exist, creating new location for '{}'", &path);
@ -141,7 +146,7 @@ pub async fn create_location(path: &str) -> Result<LocationResource, SysError> {
let data = DotSpacedrive {
location_uuid: uuid.to_string(),
library_uuid: config.current_library_id,
library_uuid: config.current_library_uuid,
};
let json = match serde_json::to_string(&data) {

View file

@ -1,37 +1,79 @@
import { ClientQuery, CoreResponse } from '@sd/core';
import { ClientCommand, ClientQuery, CoreResponse } from '@sd/core';
import { EventEmitter } from 'eventemitter3';
import { useQuery, UseQueryOptions, UseQueryResult } from 'react-query';
import {
useMutation,
useQuery,
UseQueryOptions,
UseQueryResult,
UseMutationOptions
} from 'react-query';
// global var to store the transport
export let transport: BaseTransport | null = null;
// applications utilizing this package should extend this class to instantiate a transport
export abstract class BaseTransport extends EventEmitter {
abstract send(query: ClientQuery): Promise<unknown>;
}
type KeyType = ClientQuery['key'];
type CQType<K> = Extract<ClientQuery, { key: K }>;
type CRType<K> = Extract<CoreResponse, { key: K }>;
type CQParams<CQ> = CQ extends { params: any } ? CQ['params'] : never;
type CRData<CR> = CR extends { data: any } ? CR['data'] : never;
export async function bridge<K extends KeyType, CQ extends CQType<K>, CR extends CRType<K>>(
key: K,
params?: CQParams<CQ>
): Promise<CRData<CR>> {
const result = (await transport?.send({ key, params } as any)) as any;
// console.log(`Client Query Transport: [${result?.key}]`, result?.data);
return result?.data;
abstract query(query: ClientQuery): Promise<unknown>;
abstract command(command: ClientCommand): Promise<unknown>;
}
export function setTransport(_transport: BaseTransport) {
transport = _transport;
}
export function useBridgeQuery<K extends KeyType, CQ extends CQType<K>, CR extends CRType<K>>(
key: K,
params?: CQParams<CQ>,
options: UseQueryOptions<CRData<CR>> = {}
) {
return useQuery<CRData<CR>>([key, params], async () => await bridge(key, params), options);
// extract keys from generated Rust query/command types
type QueryKeyType = ClientQuery['key'];
type CommandKeyType = ClientCommand['key'];
// extract the type from the union
type CQType<K> = Extract<ClientQuery, { key: K }>;
type CCType<K> = Extract<ClientCommand, { key: K }>;
type CRType<K> = Extract<CoreResponse, { key: K }>;
// extract payload type
type ExtractParams<P> = P extends { params: any } ? P['params'] : never;
type ExtractData<D> = D extends { data: any } ? D['data'] : never;
// vanilla method to call the transport
export async function queryBridge<
K extends QueryKeyType,
CQ extends CQType<K>,
CR extends CRType<K>
>(key: K, params?: ExtractParams<CQ>): Promise<ExtractData<CR>> {
const result = (await transport?.query({ key, params } as any)) as any;
return result?.data;
}
export async function commandBridge<
K extends CommandKeyType,
CC extends CCType<K>,
CR extends CRType<K>
>(key: K, params?: ExtractParams<CC>): Promise<ExtractData<CR>> {
const result = (await transport?.command({ key, params } as any)) as any;
return result?.data;
}
// react-query method to call the transport
export function useBridgeQuery<K extends QueryKeyType, CQ extends CQType<K>, CR extends CRType<K>>(
key: K,
params?: ExtractParams<CQ>,
options: UseQueryOptions<ExtractData<CR>> = {}
) {
return useQuery<ExtractData<CR>>(
[key, params],
async () => await queryBridge(key, params),
options
);
}
export function useBridgeCommand<
K extends CommandKeyType,
CC extends CCType<K>,
CR extends CRType<K>
>(key: K, params?: ExtractParams<CC>, options: UseMutationOptions<ExtractData<CC>> = {}) {
return useMutation<ExtractData<CR>>(
[key, params],
async () => await commandBridge(key, params),
options
);
}

View file

@ -1,7 +1,7 @@
import { useQuery } from 'react-query';
import { useState } from 'react';
import { useFileExplorerState } from './state';
import { bridge } from '../bridge';
import { useBridgeCommand, useBridgeQuery } from '../bridge';
// this hook initializes the explorer state and queries the core
export function useFileExplorer(initialPath = '/', initialLocation: number | null = null) {
@ -12,16 +12,7 @@ export function useFileExplorer(initialPath = '/', initialLocation: number | nul
// const { data: volumes } = useQuery(['sys_get_volumes'], () => bridge('sys_get_volumes'));
const { data: location } = useQuery(['SysGetLocations', { id: locationId }], () => {
return bridge('SysGetLocations', { id: locationId });
});
// for this to work we need a function with types that knows all the commands and their arguments
// this can be generated by the core, entirely Tauri agnostic, core can be initialized with a Tauri instance to act as a bridge.
const { data: files } = useQuery(['LibGetExplorerDir', path], async () => {
return await bridge('LibGetExplorerDir', { limit: fileState.row_limit, path });
});
return { location, files, setPath, setLocationId };
return { setPath, setLocationId };
}
// export function useVolumes() {