custom migrations LMAO

Co-authored-by: Brendan Allan <brendonovich@outlook.com>
This commit is contained in:
Jamie 2022-03-03 03:39:09 -08:00
parent 3a5059107a
commit 53637205d9
16 changed files with 14575 additions and 277 deletions

File diff suppressed because it is too large Load diff

View file

@ -18,7 +18,7 @@ swift-rs = "0.2.3"
tauri = { git = "https://github.com/tauri-apps/tauri", branch = "next", features = ["api-all"] }
swift-rs = "0.2.3"
sdcorelib = { path = "../../../packages/core" }
tauri-plugin-shadows = { git = "https://github.com/tauri-apps/tauri-plugin-shadows", features = ["tauri-impl"] }
# tauri-plugin-shadows = { git = "https://github.com/tauri-apps/tauri-plugin-shadows", features = ["tauri-impl"] }
# Universal Dependencies
anyhow = "1.0.44"
@ -37,7 +37,7 @@ sha256 = "1.0.2"
once_cell = "1.8.0"
int-enum = "0.4.0"
async-std = "1.10.0"
tokio = {version = "1.15.0", features= ["sync"] }
tokio = { version = "1.17.0", features = ["sync"] }
[features]
default = [ "custom-protocol" ]

View file

@ -1,17 +1,18 @@
use sdcorelib;
use tauri::api::path;
use tauri::Manager;
use tauri_plugin_shadows::Shadows;
// use tauri_plugin_shadows::Shadows;
mod commands;
mod menu;
fn main() {
#[tokio::main]
async fn main() {
let data_dir = path::data_dir().unwrap_or(std::path::PathBuf::from("./"));
let mut core_receiver = sdcorelib::configure(data_dir).await;
tauri::Builder::default()
.setup(|app| {
let data_dir = path::data_dir().unwrap_or(std::path::PathBuf::from("./"));
let mut core_receiver = sdcorelib::configure(data_dir);
let app = app.handle();
tauri::async_runtime::spawn(async move {

View file

@ -1 +1,2 @@
/target
*.db*

2024
packages/core/Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -33,6 +33,7 @@ rusqlite = "0.25.3"
refinery = { version = "0.6.0", features = ["rusqlite"] }
sqlx = { version = "0.5.7", features = ["sqlite"] }
sea-orm = { version = "^0.6.0", features = [ "sqlx-sqlite", "runtime-async-std-rustls", "macros", "debug-print"], default-features = false }
prisma-client-rust = { git = "https://github.com/Brendonovich/prisma-client-rust.git", branch = "master" }
walkdir = "^2.3.2"
bytesize = "1.1.0"
env_logger = "0.9.0"
@ -43,4 +44,5 @@ uuid = "0.8"
thumbnailer = "0.4.0"
mime = "0.3.16"
tokio = {version = "1.15.0", features=["sync"]}
tokio = { version = "1.17.0", features = ["sync", "rt"] }
include_dir = {version = "0.7.2", features = ["glob"]}

View file

@ -0,0 +1,117 @@
-- CreateTable
CREATE TABLE "libraries" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"uuid" TEXT NOT NULL,
"name" TEXT NOT NULL,
"remote_id" TEXT,
"is_primary" BOOLEAN NOT NULL DEFAULT true,
"encryption" INTEGER NOT NULL DEFAULT 0,
"total_file_count" INTEGER NOT NULL DEFAULT 0,
"total_bytes_used" TEXT NOT NULL DEFAULT '0',
"total_byte_capacity" TEXT NOT NULL DEFAULT '0',
"total_unique_bytes" TEXT NOT NULL DEFAULT '0',
"date_created" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"timezone" TEXT
);
-- CreateTable
CREATE TABLE "clients" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"uuid" TEXT NOT NULL,
"name" TEXT NOT NULL,
"platform" INTEGER NOT NULL DEFAULT 0,
"version" TEXT,
"online" BOOLEAN DEFAULT true,
"last_seen" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"timezone" TEXT,
"date_created" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
);
-- CreateTable
CREATE TABLE "locations" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"name" TEXT,
"path" TEXT,
"total_capacity" INTEGER,
"available_capacity" INTEGER,
"is_removable" BOOLEAN NOT NULL DEFAULT true,
"is_ejectable" BOOLEAN NOT NULL DEFAULT true,
"is_root_filesystem" BOOLEAN NOT NULL DEFAULT true,
"is_online" BOOLEAN NOT NULL DEFAULT true,
"date_created" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
);
-- CreateTable
CREATE TABLE "files" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"is_dir" BOOLEAN NOT NULL DEFAULT false,
"location_id" INTEGER NOT NULL,
"materialized_path" TEXT NOT NULL,
"name" TEXT NOT NULL,
"extension" TEXT,
"path_integrity_hash" TEXT NOT NULL,
"quick_integrity_hash" TEXT,
"full_integrity_hash" TEXT,
"size_in_bytes" TEXT NOT NULL,
"encryption" INTEGER NOT NULL DEFAULT 0,
"date_created" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"date_modified" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"date_indexed" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"ipfs_id" TEXT,
"parent_id" INTEGER,
CONSTRAINT "files_location_id_fkey" FOREIGN KEY ("location_id") REFERENCES "locations" ("id") ON DELETE NO ACTION ON UPDATE NO ACTION,
CONSTRAINT "files_parent_id_fkey" FOREIGN KEY ("parent_id") REFERENCES "files" ("id") ON DELETE SET NULL ON UPDATE CASCADE
);
-- CreateTable
CREATE TABLE "tags" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"name" TEXT,
"encryption" INTEGER DEFAULT 0,
"total_files" INTEGER DEFAULT 0,
"redundancy_goal" INTEGER DEFAULT 1,
"date_created" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"date_modified" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
);
-- CreateTable
CREATE TABLE "tags_on_files" (
"date_created" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"tag_id" INTEGER NOT NULL,
"file_id" INTEGER NOT NULL,
PRIMARY KEY ("tag_id", "file_id"),
CONSTRAINT "tags_on_files_file_id_fkey" FOREIGN KEY ("file_id") REFERENCES "files" ("id") ON DELETE NO ACTION ON UPDATE NO ACTION,
CONSTRAINT "tags_on_files_tag_id_fkey" FOREIGN KEY ("tag_id") REFERENCES "tags" ("id") ON DELETE NO ACTION ON UPDATE NO ACTION
);
-- CreateTable
CREATE TABLE "jobs" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"client_id" INTEGER NOT NULL,
"action" INTEGER NOT NULL,
"status" INTEGER NOT NULL DEFAULT 0,
"percentage_complete" INTEGER NOT NULL DEFAULT 0,
"task_count" INTEGER NOT NULL DEFAULT 1,
"completed_task_count" INTEGER NOT NULL DEFAULT 0,
"date_created" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"date_modified" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT "jobs_client_id_fkey" FOREIGN KEY ("client_id") REFERENCES "clients" ("id") ON DELETE NO ACTION ON UPDATE NO ACTION
);
-- CreateTable
CREATE TABLE "spaces" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"name" TEXT NOT NULL,
"encryption" INTEGER DEFAULT 0,
"date_created" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"date_modified" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"libraryId" INTEGER,
CONSTRAINT "spaces_libraryId_fkey" FOREIGN KEY ("libraryId") REFERENCES "libraries" ("id") ON DELETE SET NULL ON UPDATE CASCADE
);
-- CreateIndex
CREATE UNIQUE INDEX "clients_uuid_key" ON "clients"("uuid");
-- CreateIndex
CREATE UNIQUE INDEX "files_path_integrity_hash_key" ON "files"("path_integrity_hash");

View file

@ -0,0 +1,3 @@
# Please do not edit this file manually
# It should be added in your version-control system (i.e. Git)
provider = "sqlite"

View file

@ -0,0 +1,10 @@
-- CreateTable
CREATE TABLE "_migrations" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"name" TEXT NOT NULL,
"checksum" TEXT NOT NULL,
"steps_applied" INTEGER NOT NULL DEFAULT 0,
"applied_at" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
);
-- CreateIndex
CREATE UNIQUE INDEX "_migrations_checksum_key" ON "_migrations"("checksum");

View file

@ -1,6 +1,21 @@
datasource db {
provider = "sqlite"
url = "file:/Users/jamie/Library/Application Support/spacedrive/library.db"
url = "file:dev.db"
}
generator client {
provider = "prisma-client-rust"
output = "../src/prisma.rs"
}
model Migration {
id Int @id @default(autoincrement())
name String
checksum String @unique
steps_applied Int @default(0)
applied_at DateTime @default(now())
@@map("_migrations")
}
model Library {
@ -23,7 +38,7 @@ model Library {
model Client {
id Int @id @default(autoincrement())
uuid String @unique()
uuid String @unique
name String
platform Int @default(0)
version String?
@ -53,53 +68,57 @@ model Location {
}
model File {
id Int @id @default(autoincrement())
is_dir Boolean @default(false)
location_id Int
materialized_path String
name String
extension String?
meta_integrity_hash String @unique()
sampled_byte_integrity_hash String?
byte_integrity_hash String?
size_in_bytes String
encryption Int @default(0)
ipfs_id String?
date_created DateTime @default(now())
date_modified DateTime @default(now())
date_indexed DateTime @default(now())
directory_id Int?
parent_id Int?
locations Location? @relation(fields: [location_id], references: [id], onDelete: NoAction, onUpdate: NoAction)
files File? @relation("file_to_file_parent_id", fields: [parent_id], references: [id], onDelete: NoAction, onUpdate: NoAction)
other_files File[] @relation("file_to_file_parent_id")
tags_files TagFile[] @ignore
id Int @id @default(autoincrement())
is_dir Boolean @default(false)
location_id Int
materialized_path String
name String
extension String?
path_integrity_hash String @unique // combo of location_id, materialized_path, name, extension
quick_integrity_hash String? // 100 * 100 byte samples
full_integrity_hash String? // full byte to byte hash
size_in_bytes String
encryption Int @default(0)
date_created DateTime @default(now())
date_modified DateTime @default(now())
date_indexed DateTime @default(now())
ipfs_id String?
location Location? @relation(fields: [location_id], references: [id], onDelete: NoAction, onUpdate: NoAction)
parent File? @relation("directory_files", fields: [parent_id], references: [id])
parent_id Int?
children File[] @relation("directory_files")
file_tags TagOnFile[]
@@map("files")
}
model Tag {
id Int @id @default(autoincrement())
id Int @id @default(autoincrement())
name String?
encryption Int? @default(0)
total_files Int? @default(0)
redundancy_goal Int? @default(1)
date_created DateTime @default(now())
date_modified DateTime @default(now())
tags_files TagFile[] @ignore
encryption Int? @default(0)
total_files Int? @default(0)
redundancy_goal Int? @default(1)
date_created DateTime @default(now())
date_modified DateTime @default(now())
tag_files TagOnFile[]
@@map("tags")
}
model TagFile {
tag_id Int
file_id Int
model TagOnFile {
date_created DateTime @default(now())
file File @relation(fields: [file_id], references: [id], onDelete: NoAction, onUpdate: NoAction)
tag Tag @relation(fields: [tag_id], references: [id], onDelete: NoAction, onUpdate: NoAction)
tag_id Int
tag Tag @relation(fields: [tag_id], references: [id], onDelete: NoAction, onUpdate: NoAction)
file_id Int
file File @relation(fields: [file_id], references: [id], onDelete: NoAction, onUpdate: NoAction)
@@id([tag_id, file_id])
@@map("tags_files")
@@map("tags_on_files")
}
model Job {

View file

@ -1,8 +1,14 @@
use crate::file::checksum::sha256_digest;
use crate::prisma::{Migration, PrismaClient};
use crate::state::{self};
use anyhow::Result;
use data_encoding::HEXLOWER;
use include_dir::{include_dir, Dir};
use once_cell::sync::OnceCell;
use rusqlite::Connection;
use sea_orm::{Database, DatabaseConnection};
use sha256::digest;
use std::ffi::OsStr;
use std::io::{BufReader, Read};
pub static DB: OnceCell<DatabaseConnection> = OnceCell::new();
@ -23,6 +29,10 @@ pub async fn db() -> Result<&'static DatabaseConnection, String> {
.unwrap();
DB.set(db).unwrap_or_default();
// TODO: Error handling when brendan adds it to prisma-client-rust
// let client = PrismaClient::new_with_url(&format!("file:{}", &path)).await;
// DB.set(client).unwrap_or_default();
Ok(DB.get().unwrap())
} else {
@ -30,21 +40,136 @@ pub async fn db() -> Result<&'static DatabaseConnection, String> {
}
}
pub async fn init(db_url: &str) -> Result<(), sqlx::Error> {
// establish connection, this is only used to create the db if missing
// replace in future
let mut connection = Connection::open(&db_url).unwrap();
const INIT_MIGRATION: &str = include_str!("../../prisma/migrations/migration_table/migration.sql");
static MIGRATIONS_DIR: Dir = include_dir!("$CARGO_MANIFEST_DIR/prisma/migrations");
// migrate db
mod embedded_primary {
use refinery::embed_migrations;
embed_migrations!("src/db/migrations");
pub async fn init(db_url: &str) -> Result<(), sqlx::Error> {
let client = PrismaClient::new_with_url(&format!("file:{}", &db_url)).await;
match client
._query_raw::<serde_json::Value>(
"SELECT name FROM sqlite_master WHERE type='table' AND name='_migrations'",
)
.await
{
Ok(data) => {
if data.len() == 0 {
println!("Migration table does not exist");
client._execute_raw(INIT_MIGRATION).await;
let value: Vec<serde_json::Value> = client
._query_raw(
"SELECT name FROM sqlite_master WHERE type='table' AND name='_migrations'",
)
.await
.unwrap();
println!("Migration table created: {:?}", value);
} else {
println!("Migration table exists: {:?}", data);
}
let mut migration_subdirs = MIGRATIONS_DIR
.dirs()
.filter(|subdir| {
subdir
.path()
.file_name()
.map(|name| name != OsStr::new("migration_table"))
.unwrap_or(false)
})
.collect::<Vec<_>>();
migration_subdirs.sort_by(|a, b| {
let a_name = a.path().file_name().unwrap().to_str().unwrap();
let b_name = b.path().file_name().unwrap().to_str().unwrap();
let a_time = a_name[..15].parse::<i64>().unwrap();
let b_time = b_name[..15].parse::<i64>().unwrap();
a_time.cmp(&b_time)
});
for subdir in migration_subdirs {
println!("{:?}", subdir.path());
let migration_file = subdir
.get_file(subdir.path().join("./migration.sql"))
.unwrap();
let migration_sql = migration_file.contents_utf8().unwrap();
let digest = sha256_digest(BufReader::new(migration_file.contents()))?;
// create a lowercase hash from
let checksum = HEXLOWER.encode(digest.as_ref());
let name = subdir.path().file_name().unwrap().to_str().unwrap();
// get existing migration by checksum, if it doesn't exist run the migration
let existing_migration = client
.migration()
.find_unique(Migration::checksum().equals(checksum.clone()))
.exec()
.await;
if existing_migration.is_none() {
println!("Running migration: {}", name);
let steps = migration_sql.split(";").collect::<Vec<&str>>();
let steps = &steps[0..steps.len() - 1];
client
.migration()
.create_one(
Migration::name().set(name.to_string()),
Migration::checksum().set(checksum.clone()),
vec![],
)
.exec()
.await;
for (i, step) in steps.iter().enumerate() {
match client._execute_raw(&format!("{};", step)).await {
Ok(_) => {
println!("Step {} ran successfully", i);
client
.migration()
.find_unique(Migration::checksum().equals(checksum.clone()))
.update(vec![Migration::steps_applied().set(i as i64 + 1)])
.exec()
.await;
}
Err(e) => {
println!("Error running migration: {}", name);
println!("{}", e);
break;
}
}
}
println!("Migration {} recorded successfully", name);
} else {
println!("Migration {} already exists", name);
}
}
}
Err(err) => {
panic!("Failed to check migration table existence: {:?}", err);
}
}
embedded_primary::migrations::runner()
.run(&mut connection)
.unwrap();
// // establish connection, this is only used to create the db if missing
// // replace in future
// let mut connection = Connection::open(&db_url).unwrap();
connection.close().unwrap();
// // migrate db
// mod embedded_primary {
// use refinery::embed_migrations;
// embed_migrations!("src/db/migrations");
// }=
// embedded_primary::migrations::runner()
// .run(&mut connection)
// .unwrap();
// connection.close().unwrap();
Ok(())
}

View file

@ -5,7 +5,7 @@ use std::io;
use std::io::{BufReader, Read};
use std::time::Instant;
fn sha256_digest<R: Read>(mut reader: R) -> io::Result<Digest> {
pub fn sha256_digest<R: Read>(mut reader: R) -> io::Result<Digest> {
let mut context = Context::new(&SHA256);
let mut buffer = [0; 1024];
loop {

View file

@ -4,7 +4,9 @@ pub mod file;
pub mod library;
pub mod native;
pub mod state;
pub mod tx;
// pub mod p2p;
pub mod prisma;
pub mod util;
use futures::executor::block_on;
@ -59,7 +61,7 @@ pub async fn core_send_stream<T: Stream<Item = ClientEvent>>(stream: T) {
.await;
}
pub fn configure(mut data_dir: std::path::PathBuf) -> mpsc::Receiver<ClientEvent> {
pub async fn configure(mut data_dir: std::path::PathBuf) -> mpsc::Receiver<ClientEvent> {
data_dir = data_dir.join("spacedrive");
let (event_sender, event_receiver) = mpsc::channel(100);
@ -81,42 +83,41 @@ pub fn configure(mut data_dir: std::path::PathBuf) -> mpsc::Receiver<ClientEvent
client_config.save();
// begin asynchronous startup routines
block_on(async {
println!("Starting up... {:?}", client_config);
if client_config.libraries.len() == 0 {
match library::loader::create(None).await {
println!("Starting up... {:?}", client_config);
if client_config.libraries.len() == 0 {
match library::loader::create(None).await {
Ok(library) => {
println!("Created new library: {:?}", library);
}
Err(e) => {
println!("Error creating library: {:?}", e);
}
}
} else {
for library in client_config.libraries.iter() {
// init database for library
match library::loader::load(&library.library_path, &library.library_id).await {
Ok(library) => {
println!("Created new library: {:?}", library);
println!("Loaded library: {:?}", library);
}
Err(e) => {
println!("Error creating library: {:?}", e);
}
}
} else {
for library in client_config.libraries.iter() {
// init database for library
match library::loader::load(&library.library_path, &library.library_id).await {
Ok(library) => {
println!("Loaded library: {:?}", library);
}
Err(e) => {
println!("Error loading library: {:?}", e);
}
println!("Error loading library: {:?}", e);
}
}
}
}
// init client
match library::client::create().await {
Ok(_) => {}
Err(e) => {
println!("Error initializing client: {:?}", e);
}
};
// activate p2p listeners
// p2p::listener::listen(None);
});
// init client
match library::client::create().await {
Ok(_) => {}
Err(e) => {
println!("Error initializing client: {:?}", e);
}
};
// activate p2p listeners
// p2p::listener::listen(None);
println!("Spacedrive online");
// env_logger::builder()

10392
packages/core/src/prisma.rs Normal file

File diff suppressed because it is too large Load diff

View file

@ -1,52 +1,35 @@
use chrono::{DateTime, Utc};
use prisma_client_rust::SerializeQuery;
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::state;
// an SQL commit to be sent to connected clients
#[derive(Serialize, Deserialize)]
pub struct Commit {
pub commit_type: CommitType,
pub client_id: u32,
pub library_id: u32,
pub id: String,
pub timestamp: DateTime<Utc>,
pub sql: Option<String>,
}
enum CommitType {
Create,
Mutate,
Delete,
pub client_uuid: String,
pub library_uuid: String,
pub sql: String,
}
impl Commit {
pub fn new(commit_type: CommitType, sql: Option<String>) -> Self {
Self { commit_type, sql }
pub fn new(sql: String) -> Self {
let client = state::client::get();
let id = Uuid::new_v4().to_string();
let timestamp = Utc::now();
Self {
id,
sql,
client_uuid: client.client_id,
library_uuid: client.current_library_id,
timestamp,
}
}
pub fn from_query<T: SerializeQuery>(query: T) -> Self {
Self::new(CommitType::Mutate, query.serialize_query())
Self::new(query.serialize_query())
}
}
struct RawQuery(String);
trait SerializeQuery {
fn serialize_query(self) -> String;
}
struct PostFindMany {
query: String,
}
impl SerializeQuery for PostFindUnique {
fn serialize_query(self) -> String {
RawQuery(self.query)
}
}
fn main() {
// example
Commit::from_query(
client
.post()
.find_unique(Post::id().equals("post0".to_string()))
.with(vec![Post::user().fetch()]),
);
}

View file

@ -0,0 +1 @@
// pub mod commit;