Compare commits

...

11 commits

Author SHA1 Message Date
Ericson "Fogo" Soares f4413a4dc3
Merge cee3389eb7 into 4735adcb66 2024-06-26 18:02:04 -03:00
Matthew Yung 4735adcb66
fixed issue with tag assign mode (#2570) 2024-06-26 20:35:53 +00:00
Arnab Chakraborty aaf1f237cd
[MOB-72] Tracing logs not working on Android (#2569)
* Logging to logcat instead of logfile

Hopefully works now?

* Lock dep for Android builds only

* Don't want only debug logs lol

* Update lib.rs
2024-06-26 20:20:16 +00:00
Ericson Soares cee3389eb7 Stronger lint on sync subcrate 2024-06-20 18:54:14 -03:00
Ericson Soares aa9a117a22 Stronger linter on sync generator 2024-06-20 17:03:39 -03:00
Ericson Soares 6cdaed47ec Trying to avoid data loss on actor stop 2024-06-20 15:48:17 -03:00
Vítor Vasconcellos f72166fe70
Comment out background_processing_percentage on frontend too
- Update rust version in contributing
2024-06-20 01:13:50 -03:00
Vítor Vasconcellos 7f6f6d2067
Auto format 2024-06-20 01:00:39 -03:00
Vítor Vasconcellos 7ea836ebb2
Update core.ts 2024-06-20 00:47:22 -03:00
Vítor Vasconcellos 3f05cec895
Merge remote-tracking branch 'origin/main' into eng-1793-introduce-error-handling-for-sd-core-sync-crate 2024-06-20 00:40:33 -03:00
Ericson Soares 128b38c007 Error handling for sd-core-sync crate
Also a bunch of tweaks and fixes
2024-06-18 23:21:12 -03:00
57 changed files with 2378 additions and 1602 deletions

73
.vscode/tasks.json vendored
View file

@ -60,7 +60,6 @@
],
"env": {
"RUST_BACKTRACE": "short"
// "RUST_LOG": "sd_core::invalidate-query=trace"
},
"problemMatcher": ["$rustc"],
"group": "build",
@ -78,6 +77,78 @@
"group": "build",
"label": "rust: run spacedrive release",
"dependsOn": ["ui:build"]
},
{
"type": "cargo",
"command": "test",
"args": [
"--package",
"sd-core-sync",
"--test",
"lib",
"--",
"writes_operations_and_rows_together",
"--exact",
"--show-output"
],
"env": {
"RUST_BACKTRACE": "short",
"BROWSER": "open",
"COMMAND_MODE": "unix2003",
"EDITOR": "vim",
"GOPATH": "/Users/ericson/.go",
"GREP_COLOR": "37;45",
"GREP_COLORS": "mt=37;45",
"HOME": "/Users/ericson",
"HOMEBREW_CELLAR": "/opt/homebrew/Cellar",
"HOMEBREW_PREFIX": "/opt/homebrew",
"HOMEBREW_REPOSITORY": "/opt/homebrew",
"INFOPATH": "/opt/homebrew/share/info:/usr/local/share/info:",
"LANG": "en_US.UTF-8",
"LDFLAGS": "-L/opt/homebrew/opt/llvm/lib/c++ -Wl,-rpath,/opt/homebrew/opt/llvm/lib/c++",
"LESS": "-g -i -M -R -S -w -X -z-4",
"LESS_TERMCAP_mb": "\u001b[01;31m",
"LESS_TERMCAP_md": "\u001b[01;31m",
"LESS_TERMCAP_me": "\u001b[0m",
"LESS_TERMCAP_se": "\u001b[0m",
"LESS_TERMCAP_so": "\u001b[00;47;30m",
"LESS_TERMCAP_ue": "\u001b[0m",
"LESS_TERMCAP_us": "\u001b[01;32m",
"LOGNAME": "ericson",
"LSCOLORS": "exfxcxdxbxGxDxabagacad",
"MANPATH": "/opt/homebrew/share/man:/usr/local/share/man:::",
"MallocNanoZone": "0",
"ORIGINAL_XDG_CURRENT_DESKTOP": "undefined",
"P9K_SSH": "0",
"PAGER": "less",
"PATH": "/usr/local/opt/openjdk/bin:/usr/local/opt/game-porting-toolkit/bin:/opt/homebrew/bin:/opt/homebrew/sbin:/Users/ericson/.pnpm:/usr/local/bin:/usr/local/sbin:/opt/homebrew/bin:/opt/homebrew/sbin:/usr/local/bin:/usr/local/sbin:/System/Cryptexes/App/usr/bin:/usr/bin:/bin:/usr/sbin:/sbin:/var/run/com.apple.security.cryptexd/codex.system/bootstrap/usr/local/bin:/var/run/com.apple.security.cryptexd/codex.system/bootstrap/usr/bin:/var/run/com.apple.security.cryptexd/codex.system/bootstrap/usr/appleinternal/bin:/Library/Apple/usr/bin:/usr/local/share/dotnet:~/.dotnet/tools:/Library/Frameworks/Mono.framework/Versions/Current/Commands:/Users/ericson/.cargo/bin:/Users/ericson/Library/Application Support/JetBrains/Toolbox/scripts:/Users/ericson/.local/bin:/Users/ericson/.go/bin:/Users/ericson/.local/share/containers/podman-desktop/extensions-storage/podman-desktop.compose/bin",
"PNPM_HOME": "/Users/ericson/.pnpm",
"PWD": "/",
"SHELL": "/bin/zsh",
"SHLVL": "0",
"SSH_AUTH_SOCK": "/private/tmp/com.apple.launchd.a62yq49fKe/Listeners",
"TMPDIR": "/var/folders/k5/pvf6cfbd05s_prpwdl0h03rr0000gn/T/",
"USER": "ericson",
"VISUAL": "vim",
"VSCODE_AMD_ENTRYPOINT": "vs/workbench/api/node/extensionHostProcess",
"VSCODE_CODE_CACHE_PATH": "/Users/ericson/Library/Application Support/Code/CachedData/611f9bfce64f25108829dd295f54a6894e87339d",
"VSCODE_CRASH_REPORTER_PROCESS_TYPE": "extensionHost",
"VSCODE_CWD": "/",
"VSCODE_HANDLES_UNCAUGHT_ERRORS": "true",
"VSCODE_IPC_HOOK": "/Users/ericson/Library/Application Support/Code/1.90-main.sock",
"VSCODE_NLS_CONFIG": "{\"locale\":\"pt-br\",\"osLocale\":\"pt-br\",\"availableLanguages\":{},\"_languagePackSupport\":true}",
"VSCODE_PID": "79712",
"XPC_FLAGS": "0x0",
"XPC_SERVICE_NAME": "application.com.microsoft.VSCode.81888144.81888150",
"_": "/Applications/Visual Studio Code.app/Contents/MacOS/Electron",
"__CFBundleIdentifier": "com.microsoft.VSCode",
"__CF_USER_TEXT_ENCODING": "0x1F5:0x0:0x47",
"ELECTRON_RUN_AS_NODE": "1",
"VSCODE_L10N_BUNDLE_LOCATION": ""
},
"problemMatcher": ["$rustc"],
"group": "build",
"label": "rust: test writes_operations_and_rows_together"
}
]
}

View file

@ -89,7 +89,7 @@ To run the landing page:
If you encounter any issues, ensure that you are using the following versions of Rust, Node and Pnpm:
- Rust version: **1.78**
- Rust version: **1.79**
- Node version: **18.18**
- Pnpm version: **9.1.1**

31
Cargo.lock generated
View file

@ -132,6 +132,12 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0"
[[package]]
name = "android_log-sys"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85965b6739a430150bdd138e2374a98af0c3ee0d030b3bb7fc3bddff58d0102e"
[[package]]
name = "android_system_properties"
version = "0.1.5"
@ -8882,8 +8888,11 @@ dependencies = [
name = "sd-actors"
version = "0.1.0"
dependencies = [
"async-channel",
"futures",
"pin-project-lite",
"tokio",
"tracing",
]
[[package]]
@ -9024,6 +9033,7 @@ dependencies = [
"tokio-util",
"tower-service",
"tracing",
"tracing-android",
"tracing-appender",
"tracing-subscriber",
"tracing-test",
@ -9040,6 +9050,7 @@ dependencies = [
"prisma-client-rust",
"regex",
"sd-core-prisma-helpers",
"sd-core-sync",
"sd-prisma",
"sd-utils",
"serde",
@ -9134,17 +9145,24 @@ dependencies = [
name = "sd-core-sync"
version = "0.0.0"
dependencies = [
"async-channel",
"futures",
"futures-concurrency",
"prisma-client-rust",
"rmp-serde",
"rmpv",
"rspc",
"sd-actors",
"sd-prisma",
"sd-sync",
"sd-utils",
"serde",
"serde_json",
"thiserror",
"tokio",
"tracing",
"tracing-subscriber",
"tracing-test",
"uhlc",
"uuid",
]
@ -9463,7 +9481,7 @@ dependencies = [
"futures",
"futures-concurrency",
"lending-stream",
"pin-project",
"pin-project-lite",
"rand 0.8.5",
"rmp-serde",
"serde",
@ -11239,6 +11257,17 @@ dependencies = [
"tracing-core",
]
[[package]]
name = "tracing-android"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "12612be8f868a09c0ceae7113ff26afe79d81a24473a393cb9120ece162e86c0"
dependencies = [
"android_log-sys",
"tracing",
"tracing-subscriber",
]
[[package]]
name = "tracing-appender"
version = "0.2.3"

View file

@ -159,6 +159,9 @@ icrate = { version = "0.1.2", features = [
"Foundation_NSNumber",
] }
[target.'cfg(target_os = "android")'.dependencies]
tracing-android = "0.2.0"
[dev-dependencies]
# Workspace dependencies
globset = { workspace = true }

View file

@ -11,6 +11,7 @@ edition = { workspace = true }
[dependencies]
# Inner Core Sub-crates
sd-core-prisma-helpers = { path = "../prisma-helpers" }
sd-core-sync = { path = "../sync" }
# Spacedrive Sub-crates
sd-prisma = { path = "../../../crates/prisma" }

View file

@ -173,6 +173,8 @@ pub enum FilePathError {
NonUtf8Path(#[from] NonUtf8PathError),
#[error("received an invalid filename and extension: <filename_and_extension='{0}'>")]
InvalidFilenameAndExtension(String),
#[error(transparent)]
Sync(#[from] sd_core_sync::Error),
}
#[must_use]

View file

@ -51,6 +51,8 @@ pub enum Error {
FilePathError(#[from] FilePathError),
#[error(transparent)]
SubPath(#[from] sub_path::Error),
#[error(transparent)]
Sync(#[from] sd_core_sync::Error),
}
impl From<Error> for rspc::Error {

View file

@ -60,6 +60,8 @@ pub enum Error {
NonUtf8Path(#[from] NonUtf8PathError),
#[error(transparent)]
IsoFilePath(#[from] FilePathError),
#[error(transparent)]
Sync(#[from] sd_core_sync::Error),
#[error("missing field on database: {0}")]
MissingField(#[from] MissingFieldError),
#[error("failed to deserialized stored tasks for job resume: {0}")]

View file

@ -1,6 +1,6 @@
use crate::{indexer, Error};
use sd_core_file_path_helper::IsolatedFilePathDataParts;
use sd_core_file_path_helper::{FilePathMetadata, IsolatedFilePathDataParts};
use sd_core_sync::Manager as SyncManager;
use sd_prisma::{
@ -9,7 +9,10 @@ use sd_prisma::{
};
use sd_sync::{sync_db_entry, OperationFactory};
use sd_task_system::{ExecStatus, Interrupter, IntoAnyTaskOutput, SerializableTask, Task, TaskId};
use sd_utils::{db::inode_to_db, msgpack};
use sd_utils::{
db::{inode_to_db, size_in_bytes_to_db},
msgpack,
};
use std::{sync::Arc, time::Duration};
@ -92,7 +95,14 @@ impl Task<Error> for Saver {
pub_id,
maybe_object_id,
iso_file_path,
metadata,
metadata:
FilePathMetadata {
inode,
size_in_bytes,
created_at,
modified_at,
hidden,
},
}| {
let IsolatedFilePathDataParts {
materialized_path,
@ -118,19 +128,16 @@ impl Task<Error> for Saver {
),
location_id::set(Some(*location_id)),
),
sync_db_entry!(materialized_path.to_string(), materialized_path),
sync_db_entry!(name.to_string(), name),
sync_db_entry!(materialized_path, materialized_path),
sync_db_entry!(name, name),
sync_db_entry!(is_dir, is_dir),
sync_db_entry!(extension.to_string(), extension),
sync_db_entry!(
metadata.size_in_bytes.to_be_bytes().to_vec(),
size_in_bytes_bytes
),
sync_db_entry!(inode_to_db(metadata.inode), inode),
sync_db_entry!(metadata.created_at.into(), date_created),
sync_db_entry!(metadata.modified_at.into(), date_modified),
sync_db_entry!(Utc::now().into(), date_indexed),
sync_db_entry!(metadata.hidden, hidden),
sync_db_entry!(extension, extension),
sync_db_entry!(size_in_bytes_to_db(size_in_bytes), size_in_bytes_bytes),
sync_db_entry!(inode_to_db(inode), inode),
sync_db_entry!(created_at, date_created),
sync_db_entry!(modified_at, date_modified),
sync_db_entry!(Utc::now(), date_indexed),
sync_db_entry!(hidden, hidden),
]
.into_iter()
.unzip();

View file

@ -1,6 +1,6 @@
use crate::{indexer, Error};
use sd_core_file_path_helper::IsolatedFilePathDataParts;
use sd_core_file_path_helper::{FilePathMetadata, IsolatedFilePathDataParts};
use sd_core_sync::Manager as SyncManager;
use sd_prisma::{
@ -11,7 +11,11 @@ use sd_sync::{sync_db_entry, OperationFactory};
use sd_task_system::{
check_interruption, ExecStatus, Interrupter, IntoAnyTaskOutput, SerializableTask, Task, TaskId,
};
use sd_utils::{chain_optional_iter, db::inode_to_db, msgpack};
use sd_utils::{
chain_optional_iter,
db::{inode_to_db, size_in_bytes_to_db},
msgpack,
};
use std::{collections::HashSet, sync::Arc, time::Duration};
@ -96,7 +100,14 @@ impl Task<Error> for Updater {
pub_id,
maybe_object_id,
iso_file_path,
metadata,
metadata:
FilePathMetadata {
inode,
size_in_bytes,
created_at,
modified_at,
hidden,
},
}| {
let IsolatedFilePathDataParts { is_dir, .. } = &iso_file_path.to_parts();
@ -108,20 +119,11 @@ impl Task<Error> for Updater {
[
((cas_id::NAME, msgpack!(nil)), cas_id::set(None)),
sync_db_entry!(*is_dir, is_dir),
sync_db_entry!(
metadata.size_in_bytes.to_be_bytes().to_vec(),
size_in_bytes_bytes
),
sync_db_entry!(inode_to_db(metadata.inode), inode),
{
let v = metadata.created_at.into();
sync_db_entry!(v, date_created)
},
{
let v = metadata.modified_at.into();
sync_db_entry!(v, date_modified)
},
sync_db_entry!(metadata.hidden, hidden),
sync_db_entry!(size_in_bytes_to_db(size_in_bytes), size_in_bytes_bytes),
sync_db_entry!(inode_to_db(inode), inode),
sync_db_entry!(created_at, date_created),
sync_db_entry!(modified_at, date_modified),
sync_db_entry!(hidden, hidden),
],
[
// As this file was updated while Spacedrive was offline, we mark the object_id and cas_id as null

View file

@ -16,7 +16,6 @@ use std::path::Path;
use futures_concurrency::future::TryJoin;
use once_cell::sync::Lazy;
use prisma_client_rust::QueryError;
use super::from_slice_option_to_option;
@ -107,7 +106,7 @@ pub async fn save(
exif_datas: impl IntoIterator<Item = (ExifMetadata, object::id::Type, ObjectPubId)> + Send,
db: &PrismaClient,
sync: &SyncManager,
) -> Result<u64, QueryError> {
) -> Result<u64, sd_core_sync::Error> {
exif_datas
.into_iter()
.map(|(exif_data, object_id, object_pub_id)| async move {

View file

@ -110,7 +110,7 @@ pub async fn extract(
pub async fn save(
ffmpeg_datas: impl IntoIterator<Item = (FFmpegMetadata, object::id::Type)> + Send,
db: &PrismaClient,
) -> Result<u64, QueryError> {
) -> Result<u64, sd_core_sync::Error> {
ffmpeg_datas
.into_iter()
.map(

View file

@ -55,6 +55,8 @@ pub enum Error {
FilePathError(#[from] FilePathError),
#[error(transparent)]
SubPath(#[from] sub_path::Error),
#[error(transparent)]
Sync(#[from] sd_core_sync::Error),
}
impl From<Error> for rspc::Error {

View file

@ -14,12 +14,21 @@ sd-utils = { path = "../../../crates/utils" }
sd-actors = { path = "../../../crates/actors" }
# Workspace dependencies
prisma-client-rust = { workspace = true }
async-channel = { workspace = true }
futures = { workspace = true }
futures-concurrency = { workspace = true }
prisma-client-rust = { workspace = true, features = ["rspc"] }
rmpv = { workspace = true }
rmp-serde = { workspace = true }
rspc = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
tokio = { workspace = true }
thiserror = { workspace = true }
tracing = { workspace = true }
uhlc = { workspace = true }
uuid = { workspace = true }
[dev-dependencies]
tracing-test = { workspace = true }
tracing-subscriber = { workspace = true, features = ["env-filter"] }

View file

@ -1,16 +1,14 @@
use std::sync::Arc;
use tokio::sync::{mpsc, Mutex};
use async_channel as chan;
pub trait ActorTypes {
type Event;
type Request;
type Event: Send;
type Request: Send;
type Handler;
}
pub struct ActorIO<T: ActorTypes> {
pub event_rx: Arc<Mutex<mpsc::Receiver<T::Event>>>,
pub req_tx: mpsc::Sender<T::Request>,
pub event_rx: chan::Receiver<T::Event>,
pub req_tx: chan::Sender<T::Request>,
}
impl<T: ActorTypes> Clone for ActorIO<T> {
@ -23,33 +21,19 @@ impl<T: ActorTypes> Clone for ActorIO<T> {
}
impl<T: ActorTypes> ActorIO<T> {
pub async fn send(&self, value: T::Request) -> Result<(), mpsc::error::SendError<T::Request>> {
pub async fn send(&self, value: T::Request) -> Result<(), chan::SendError<T::Request>> {
self.req_tx.send(value).await
}
}
pub struct HandlerIO<T: ActorTypes> {
pub event_tx: mpsc::Sender<T::Event>,
pub req_rx: mpsc::Receiver<T::Request>,
pub event_tx: chan::Sender<T::Event>,
pub req_rx: chan::Receiver<T::Request>,
}
pub fn create_actor_io<T: ActorTypes>() -> (ActorIO<T>, HandlerIO<T>) {
let (req_tx, req_rx) = mpsc::channel(20);
let (event_tx, event_rx) = mpsc::channel(20);
let event_rx = Arc::new(Mutex::new(event_rx));
let (req_tx, req_rx) = chan::bounded(32);
let (event_tx, event_rx) = chan::bounded(32);
(ActorIO { event_rx, req_tx }, HandlerIO { event_tx, req_rx })
}
#[macro_export]
macro_rules! wait {
($rx:expr, $pattern:pat $(=> $expr:expr)?) => {
loop {
match $rx.recv().await {
Some($pattern) => break $($expr)?,
_ => continue
}
}
};
}

View file

@ -1,447 +1,74 @@
use std::future::Future;
use sd_prisma::{
prisma::{
crdt_operation, exif_data, file_path, label, label_on_object, location, object, tag,
tag_on_object, PrismaClient, SortOrder,
crdt_operation, exif_data, file_path, instance, label, label_on_object, location, object,
tag, tag_on_object, PrismaClient, SortOrder,
},
prisma_sync,
};
use sd_sync::{option_sync_entry, OperationFactory};
use sd_utils::{chain_optional_iter, msgpack};
use sd_sync::{option_sync_entry, sync_entry, OperationFactory};
use sd_utils::chain_optional_iter;
use crate::crdt_op_unchecked_db;
use std::future::Future;
/// Takes all the syncable data in the database and generates CRDTOperations for it.
use tokio::time::Instant;
use tracing::{debug, instrument};
use super::{crdt_op_unchecked_db, Error};
/// Takes all the syncable data in the database and generates [`CRDTOperations`] for it.
/// This is a requirement before the library can sync.
pub async fn backfill_operations(db: &PrismaClient, sync: &crate::Manager, instance_id: i32) {
let lock = sync.timestamp_lock.acquire().await;
pub async fn backfill_operations(
db: &PrismaClient,
sync: &crate::Manager,
instance_id: instance::id::Type,
) -> Result<(), Error> {
let lock = sync.timestamp_lock.lock().await;
db._transaction()
.with_timeout(9999999999)
let res = db
._transaction()
.with_timeout(9_999_999_999)
.run(|db| async move {
println!("backfill started");
debug!("backfill started");
let start = Instant::now();
db.crdt_operation()
.delete_many(vec![crdt_operation::instance_id::equals(instance_id)])
.exec()
.await?;
paginate(
|cursor| {
db.tag()
.find_many(vec![tag::id::gt(cursor)])
.order_by(tag::id::order(SortOrder::Asc))
.exec()
},
|tag| tag.id,
|tags| {
db.crdt_operation()
.create_many(
tags.into_iter()
.flat_map(|t| {
sync.shared_create(
prisma_sync::tag::SyncId { pub_id: t.pub_id },
chain_optional_iter(
[],
[
t.name.map(|v| (tag::name::NAME, msgpack!(v))),
t.color.map(|v| (tag::color::NAME, msgpack!(v))),
t.date_created.map(|v| {
(tag::date_created::NAME, msgpack!(v))
}),
t.date_modified.map(|v| {
(tag::date_modified::NAME, msgpack!(v))
}),
],
),
)
})
.map(|o| crdt_op_unchecked_db(&o, instance_id))
.collect(),
)
.exec()
},
)
.await?;
paginate_tags(&db, sync, instance_id).await?;
paginate_locations(&db, sync, instance_id).await?;
paginate_objects(&db, sync, instance_id).await?;
paginate_exif_datas(&db, sync, instance_id).await?;
paginate_file_paths(&db, sync, instance_id).await?;
paginate_tags_on_objects(&db, sync, instance_id).await?;
paginate_labels(&db, sync, instance_id).await?;
paginate_labels_on_objects(&db, sync, instance_id).await?;
paginate(
|cursor| {
db.location()
.find_many(vec![location::id::gt(cursor)])
.order_by(location::id::order(SortOrder::Asc))
.take(1000)
.include(location::include!({
instance: select {
id
pub_id
}
}))
.exec()
},
|location| location.id,
|locations| {
db.crdt_operation()
.create_many(
locations
.into_iter()
.flat_map(|l| {
use location::*;
debug!(elapsed = ?start.elapsed(), "backfill ended");
sync.shared_create(
prisma_sync::location::SyncId { pub_id: l.pub_id },
chain_optional_iter(
[],
[
option_sync_entry!(l.name, name),
option_sync_entry!(l.path, path),
option_sync_entry!(
l.total_capacity,
total_capacity
),
option_sync_entry!(
l.available_capacity,
available_capacity
),
option_sync_entry!(l.size_in_bytes, size_in_bytes),
option_sync_entry!(l.is_archived, is_archived),
option_sync_entry!(
l.generate_preview_media,
generate_preview_media
),
option_sync_entry!(
l.sync_preview_media,
sync_preview_media
),
option_sync_entry!(l.hidden, hidden),
option_sync_entry!(l.date_created, date_created),
option_sync_entry!(
l.instance.map(|i| {
prisma_sync::instance::SyncId {
pub_id: i.pub_id,
}
}),
instance
),
],
),
)
})
.map(|o| crdt_op_unchecked_db(&o, instance_id))
.collect(),
)
.exec()
},
)
.await?;
paginate(
|cursor| {
db.object()
.find_many(vec![object::id::gt(cursor)])
.order_by(object::id::order(SortOrder::Asc))
.take(1000)
.exec()
},
|object| object.id,
|objects| {
db.crdt_operation()
.create_many(
objects
.into_iter()
.flat_map(|o| {
use object::*;
sync.shared_create(
prisma_sync::object::SyncId { pub_id: o.pub_id },
chain_optional_iter(
[],
[
option_sync_entry!(o.kind, kind),
option_sync_entry!(o.hidden, hidden),
option_sync_entry!(o.favorite, favorite),
option_sync_entry!(o.important, important),
option_sync_entry!(o.note, note),
option_sync_entry!(o.date_created, date_created),
option_sync_entry!(o.date_accessed, date_accessed),
],
),
)
})
.map(|o| crdt_op_unchecked_db(&o, instance_id))
.collect(),
)
.exec()
},
)
.await?;
paginate(
|cursor| {
db.exif_data()
.find_many(vec![exif_data::id::gt(cursor)])
.order_by(exif_data::id::order(SortOrder::Asc))
.take(1000)
.include(exif_data::include!({
object: select { pub_id }
}))
.exec()
},
|o| o.id,
|media_datas| {
db.crdt_operation()
.create_many(
media_datas
.into_iter()
.flat_map(|md| {
use exif_data::*;
sync.shared_create(
prisma_sync::exif_data::SyncId {
object: prisma_sync::object::SyncId {
pub_id: md.object.pub_id,
},
},
chain_optional_iter(
[],
[
option_sync_entry!(md.resolution, resolution),
option_sync_entry!(md.media_date, media_date),
option_sync_entry!(
md.media_location,
media_location
),
option_sync_entry!(md.camera_data, camera_data),
option_sync_entry!(md.artist, artist),
option_sync_entry!(md.description, description),
option_sync_entry!(md.copyright, copyright),
option_sync_entry!(md.exif_version, exif_version),
option_sync_entry!(md.epoch_time, epoch_time),
],
),
)
})
.map(|o| crdt_op_unchecked_db(&o, instance_id))
.collect(),
)
.exec()
},
)
.await?;
paginate(
|cursor| {
db.file_path()
.find_many(vec![file_path::id::gt(cursor)])
.order_by(file_path::id::order(SortOrder::Asc))
.include(file_path::include!({
location: select { pub_id }
object: select { pub_id }
}))
.exec()
},
|o| o.id,
|file_paths| {
db.crdt_operation()
.create_many(
file_paths
.into_iter()
.flat_map(|fp| {
use file_path::*;
sync.shared_create(
prisma_sync::file_path::SyncId { pub_id: fp.pub_id },
chain_optional_iter(
[],
[
option_sync_entry!(fp.is_dir, is_dir),
option_sync_entry!(fp.cas_id, cas_id),
option_sync_entry!(
fp.integrity_checksum,
integrity_checksum
),
option_sync_entry!(
fp.location.map(|l| {
prisma_sync::location::SyncId {
pub_id: l.pub_id,
}
}),
location
),
option_sync_entry!(
fp.object.map(|o| {
prisma_sync::object::SyncId {
pub_id: o.pub_id,
}
}),
object
),
option_sync_entry!(
fp.materialized_path,
materialized_path
),
option_sync_entry!(fp.name, name),
option_sync_entry!(fp.extension, extension),
option_sync_entry!(fp.hidden, hidden),
option_sync_entry!(
fp.size_in_bytes_bytes,
size_in_bytes_bytes
),
option_sync_entry!(fp.inode, inode),
option_sync_entry!(fp.date_created, date_created),
option_sync_entry!(fp.date_modified, date_modified),
option_sync_entry!(fp.date_indexed, date_indexed),
],
),
)
})
.map(|o| crdt_op_unchecked_db(&o, instance_id))
.collect(),
)
.exec()
},
)
.await?;
paginate_relation(
|group_id, item_id| {
db.tag_on_object()
.find_many(vec![
tag_on_object::tag_id::gt(group_id),
tag_on_object::object_id::gt(item_id),
])
.order_by(tag_on_object::tag_id::order(SortOrder::Asc))
.order_by(tag_on_object::object_id::order(SortOrder::Asc))
.include(tag_on_object::include!({
tag: select { pub_id }
object: select { pub_id }
}))
.exec()
},
|t_o| (t_o.tag_id, t_o.object_id),
|tag_on_objects| {
db.crdt_operation()
.create_many(
tag_on_objects
.into_iter()
.flat_map(|t_o| {
sync.relation_create(
prisma_sync::tag_on_object::SyncId {
tag: prisma_sync::tag::SyncId {
pub_id: t_o.tag.pub_id,
},
object: prisma_sync::object::SyncId {
pub_id: t_o.object.pub_id,
},
},
chain_optional_iter(
[],
[option_sync_entry!(
t_o.date_created,
tag_on_object::date_created
)],
),
)
})
.map(|o| crdt_op_unchecked_db(&o, instance_id))
.collect(),
)
.exec()
},
)
.await?;
paginate(
|cursor| {
db.label()
.find_many(vec![label::id::gt(cursor)])
.order_by(label::id::order(SortOrder::Asc))
.exec()
},
|label| label.id,
|labels| {
db.crdt_operation()
.create_many(
labels
.into_iter()
.flat_map(|l| {
sync.shared_create(
prisma_sync::label::SyncId { name: l.name },
[
(label::date_created::NAME, msgpack!(l.date_created)),
(label::date_modified::NAME, msgpack!(l.date_modified)),
],
)
})
.map(|o| crdt_op_unchecked_db(&o, instance_id))
.collect(),
)
.exec()
},
)
.await?;
let res = paginate_relation(
|group_id, item_id| {
db.label_on_object()
.find_many(vec![
label_on_object::label_id::gt(group_id),
label_on_object::object_id::gt(item_id),
])
.order_by(label_on_object::label_id::order(SortOrder::Asc))
.order_by(label_on_object::object_id::order(SortOrder::Asc))
.include(label_on_object::include!({
object: select { pub_id }
label: select { name }
}))
.exec()
},
|l_o| (l_o.label_id, l_o.object_id),
|label_on_objects| {
db.crdt_operation()
.create_many(
label_on_objects
.into_iter()
.flat_map(|l_o| {
sync.relation_create(
prisma_sync::label_on_object::SyncId {
label: prisma_sync::label::SyncId {
name: l_o.label.name,
},
object: prisma_sync::object::SyncId {
pub_id: l_o.object.pub_id,
},
},
[],
)
})
.map(|o| crdt_op_unchecked_db(&o, instance_id))
.collect(),
)
.exec()
},
)
.await;
println!("backfill ended");
res
Ok(())
})
.await
.unwrap();
.await;
drop(lock);
res
}
async fn paginate<
T,
E: std::fmt::Debug,
TGetter: Future<Output = Result<Vec<T>, E>>,
TOperations: Future<Output = Result<i64, E>>,
>(
getter: impl Fn(i32) -> TGetter,
id: impl Fn(&T) -> i32,
operations: impl Fn(Vec<T>) -> TOperations,
) -> Result<(), E> {
async fn paginate<T, E1, E2, E3, GetterFut, OperationsFut>(
getter: impl Fn(i32) -> GetterFut + Send,
id: impl Fn(&T) -> i32 + Send,
operations: impl Fn(Vec<T>) -> Result<OperationsFut, E3> + Send,
) -> Result<(), Error>
where
T: Send,
E1: Send,
E2: Send,
E3: Send,
Error: From<E1> + From<E2> + From<E3> + Send,
GetterFut: Future<Output = Result<Vec<T>, E1>> + Send,
OperationsFut: Future<Output = Result<i64, E2>> + Send,
{
let mut next_cursor = Some(-1);
loop {
let Some(cursor) = next_cursor else {
@ -450,22 +77,26 @@ async fn paginate<
let items = getter(cursor).await?;
next_cursor = items.last().map(&id);
operations(items).await?;
operations(items)?.await?;
}
Ok(())
}
async fn paginate_relation<
T,
E: std::fmt::Debug,
TGetter: Future<Output = Result<Vec<T>, E>>,
TOperations: Future<Output = Result<i64, E>>,
>(
getter: impl Fn(i32, i32) -> TGetter,
id: impl Fn(&T) -> (i32, i32),
operations: impl Fn(Vec<T>) -> TOperations,
) -> Result<(), E> {
async fn paginate_relation<T, E1, E2, E3, GetterFut, OperationsFut>(
getter: impl Fn(i32, i32) -> GetterFut + Send,
id: impl Fn(&T) -> (i32, i32) + Send,
operations: impl Fn(Vec<T>) -> Result<OperationsFut, E3> + Send,
) -> Result<(), Error>
where
T: Send,
E1: Send,
E2: Send,
E3: Send,
Error: From<E1> + From<E2> + From<E3> + Send,
GetterFut: Future<Output = Result<Vec<T>, E1>> + Send,
OperationsFut: Future<Output = Result<i64, E2>> + Send,
{
let mut next_cursor = Some((-1, -1));
loop {
let Some(cursor) = next_cursor else {
@ -474,8 +105,416 @@ async fn paginate_relation<
let items = getter(cursor.0, cursor.1).await?;
next_cursor = items.last().map(&id);
operations(items).await?;
operations(items)?.await?;
}
Ok(())
}
#[instrument(skip(db, sync), err)]
async fn paginate_tags(
db: &PrismaClient,
sync: &crate::Manager,
instance_id: instance::id::Type,
) -> Result<(), Error> {
use tag::{color, date_created, date_modified, id, name};
paginate(
|cursor| {
db.tag()
.find_many(vec![id::gt(cursor)])
.order_by(id::order(SortOrder::Asc))
.exec()
},
|tag| tag.id,
|tags| {
tags.into_iter()
.flat_map(|t| {
sync.shared_create(
prisma_sync::tag::SyncId { pub_id: t.pub_id },
chain_optional_iter(
[],
[
option_sync_entry!(t.name, name),
option_sync_entry!(t.color, color),
option_sync_entry!(t.date_created, date_created),
option_sync_entry!(t.date_modified, date_modified),
],
),
)
})
.map(|o| crdt_op_unchecked_db(&o, instance_id))
.collect::<Result<Vec<_>, _>>()
.map(|creates| db.crdt_operation().create_many(creates).exec())
},
)
.await
}
#[instrument(skip(db, sync), err)]
async fn paginate_locations(
db: &PrismaClient,
sync: &crate::Manager,
instance_id: instance::id::Type,
) -> Result<(), Error> {
use location::{
available_capacity, date_created, generate_preview_media, hidden, id, include, instance,
is_archived, name, path, size_in_bytes, sync_preview_media, total_capacity,
};
paginate(
|cursor| {
db.location()
.find_many(vec![id::gt(cursor)])
.order_by(id::order(SortOrder::Asc))
.take(1000)
.include(include!({
instance: select {
id
pub_id
}
}))
.exec()
},
|location| location.id,
|locations| {
locations
.into_iter()
.flat_map(|l| {
sync.shared_create(
prisma_sync::location::SyncId { pub_id: l.pub_id },
chain_optional_iter(
[],
[
option_sync_entry!(l.name, name),
option_sync_entry!(l.path, path),
option_sync_entry!(l.total_capacity, total_capacity),
option_sync_entry!(l.available_capacity, available_capacity),
option_sync_entry!(l.size_in_bytes, size_in_bytes),
option_sync_entry!(l.is_archived, is_archived),
option_sync_entry!(
l.generate_preview_media,
generate_preview_media
),
option_sync_entry!(l.sync_preview_media, sync_preview_media),
option_sync_entry!(l.hidden, hidden),
option_sync_entry!(l.date_created, date_created),
option_sync_entry!(
l.instance.map(|i| {
prisma_sync::instance::SyncId { pub_id: i.pub_id }
}),
instance
),
],
),
)
})
.map(|o| crdt_op_unchecked_db(&o, instance_id))
.collect::<Result<Vec<_>, _>>()
.map(|creates| db.crdt_operation().create_many(creates).exec())
},
)
.await
}
#[instrument(skip(db, sync), err)]
async fn paginate_objects(
db: &PrismaClient,
sync: &crate::Manager,
instance_id: instance::id::Type,
) -> Result<(), Error> {
use object::{date_accessed, date_created, favorite, hidden, id, important, kind, note};
paginate(
|cursor| {
db.object()
.find_many(vec![id::gt(cursor)])
.order_by(id::order(SortOrder::Asc))
.take(1000)
.exec()
},
|object| object.id,
|objects| {
objects
.into_iter()
.flat_map(|o| {
sync.shared_create(
prisma_sync::object::SyncId { pub_id: o.pub_id },
chain_optional_iter(
[],
[
option_sync_entry!(o.kind, kind),
option_sync_entry!(o.hidden, hidden),
option_sync_entry!(o.favorite, favorite),
option_sync_entry!(o.important, important),
option_sync_entry!(o.note, note),
option_sync_entry!(o.date_created, date_created),
option_sync_entry!(o.date_accessed, date_accessed),
],
),
)
})
.map(|o| crdt_op_unchecked_db(&o, instance_id))
.collect::<Result<Vec<_>, _>>()
.map(|creates| db.crdt_operation().create_many(creates).exec())
},
)
.await
}
#[instrument(skip(db, sync), err)]
async fn paginate_exif_datas(
db: &PrismaClient,
sync: &crate::Manager,
instance_id: instance::id::Type,
) -> Result<(), Error> {
use exif_data::{
artist, camera_data, copyright, description, epoch_time, exif_version, id, include,
media_date, media_location, resolution,
};
paginate(
|cursor| {
db.exif_data()
.find_many(vec![id::gt(cursor)])
.order_by(id::order(SortOrder::Asc))
.take(1000)
.include(include!({
object: select { pub_id }
}))
.exec()
},
|ed| ed.id,
|exif_datas| {
exif_datas
.into_iter()
.flat_map(|ed| {
sync.shared_create(
prisma_sync::exif_data::SyncId {
object: prisma_sync::object::SyncId {
pub_id: ed.object.pub_id,
},
},
chain_optional_iter(
[],
[
option_sync_entry!(ed.resolution, resolution),
option_sync_entry!(ed.media_date, media_date),
option_sync_entry!(ed.media_location, media_location),
option_sync_entry!(ed.camera_data, camera_data),
option_sync_entry!(ed.artist, artist),
option_sync_entry!(ed.description, description),
option_sync_entry!(ed.copyright, copyright),
option_sync_entry!(ed.exif_version, exif_version),
option_sync_entry!(ed.epoch_time, epoch_time),
],
),
)
})
.map(|o| crdt_op_unchecked_db(&o, instance_id))
.collect::<Result<Vec<_>, _>>()
.map(|creates| db.crdt_operation().create_many(creates).exec())
},
)
.await
}
#[instrument(skip(db, sync), err)]
async fn paginate_file_paths(
db: &PrismaClient,
sync: &crate::Manager,
instance_id: instance::id::Type,
) -> Result<(), Error> {
use file_path::{
cas_id, date_created, date_indexed, date_modified, extension, hidden, id, include, inode,
integrity_checksum, is_dir, location, materialized_path, name, object, size_in_bytes_bytes,
};
paginate(
|cursor| {
db.file_path()
.find_many(vec![id::gt(cursor)])
.order_by(id::order(SortOrder::Asc))
.include(include!({
location: select { pub_id }
object: select { pub_id }
}))
.exec()
},
|o| o.id,
|file_paths| {
file_paths
.into_iter()
.flat_map(|fp| {
sync.shared_create(
prisma_sync::file_path::SyncId { pub_id: fp.pub_id },
chain_optional_iter(
[],
[
option_sync_entry!(fp.is_dir, is_dir),
option_sync_entry!(fp.cas_id, cas_id),
option_sync_entry!(fp.integrity_checksum, integrity_checksum),
option_sync_entry!(
fp.location.map(|l| {
prisma_sync::location::SyncId { pub_id: l.pub_id }
}),
location
),
option_sync_entry!(
fp.object.map(|o| {
prisma_sync::object::SyncId { pub_id: o.pub_id }
}),
object
),
option_sync_entry!(fp.materialized_path, materialized_path),
option_sync_entry!(fp.name, name),
option_sync_entry!(fp.extension, extension),
option_sync_entry!(fp.hidden, hidden),
option_sync_entry!(fp.size_in_bytes_bytes, size_in_bytes_bytes),
option_sync_entry!(fp.inode, inode),
option_sync_entry!(fp.date_created, date_created),
option_sync_entry!(fp.date_modified, date_modified),
option_sync_entry!(fp.date_indexed, date_indexed),
],
),
)
})
.map(|o| crdt_op_unchecked_db(&o, instance_id))
.collect::<Result<Vec<_>, _>>()
.map(|creates| db.crdt_operation().create_many(creates).exec())
},
)
.await
}
#[instrument(skip(db, sync), err)]
async fn paginate_tags_on_objects(
db: &PrismaClient,
sync: &crate::Manager,
instance_id: instance::id::Type,
) -> Result<(), Error> {
use tag_on_object::{date_created, include, object_id, tag_id};
paginate_relation(
|group_id, item_id| {
db.tag_on_object()
.find_many(vec![tag_id::gt(group_id), object_id::gt(item_id)])
.order_by(tag_id::order(SortOrder::Asc))
.order_by(object_id::order(SortOrder::Asc))
.include(include!({
tag: select { pub_id }
object: select { pub_id }
}))
.exec()
},
|t_o| (t_o.tag_id, t_o.object_id),
|tag_on_objects| {
tag_on_objects
.into_iter()
.flat_map(|t_o| {
sync.relation_create(
prisma_sync::tag_on_object::SyncId {
tag: prisma_sync::tag::SyncId {
pub_id: t_o.tag.pub_id,
},
object: prisma_sync::object::SyncId {
pub_id: t_o.object.pub_id,
},
},
chain_optional_iter(
[],
[option_sync_entry!(t_o.date_created, date_created)],
),
)
})
.map(|o| crdt_op_unchecked_db(&o, instance_id))
.collect::<Result<Vec<_>, _>>()
.map(|creates| db.crdt_operation().create_many(creates).exec())
},
)
.await
}
#[instrument(skip(db, sync), err)]
async fn paginate_labels(
db: &PrismaClient,
sync: &crate::Manager,
instance_id: instance::id::Type,
) -> Result<(), Error> {
use label::{date_created, date_modified, id};
paginate(
|cursor| {
db.label()
.find_many(vec![id::gt(cursor)])
.order_by(id::order(SortOrder::Asc))
.exec()
},
|label| label.id,
|labels| {
labels
.into_iter()
.flat_map(|l| {
sync.shared_create(
prisma_sync::label::SyncId { name: l.name },
chain_optional_iter(
[],
[
option_sync_entry!(l.date_created, date_created),
option_sync_entry!(l.date_modified, date_modified),
],
),
)
})
.map(|o| crdt_op_unchecked_db(&o, instance_id))
.collect::<Result<Vec<_>, _>>()
.map(|creates| db.crdt_operation().create_many(creates).exec())
},
)
.await
}
#[instrument(skip(db, sync), err)]
async fn paginate_labels_on_objects(
db: &PrismaClient,
sync: &crate::Manager,
instance_id: instance::id::Type,
) -> Result<(), Error> {
use label_on_object::{date_created, include, label_id, object_id};
paginate_relation(
|group_id, item_id| {
db.label_on_object()
.find_many(vec![label_id::gt(group_id), object_id::gt(item_id)])
.order_by(label_id::order(SortOrder::Asc))
.order_by(object_id::order(SortOrder::Asc))
.include(include!({
object: select { pub_id }
label: select { name }
}))
.exec()
},
|l_o| (l_o.label_id, l_o.object_id),
|label_on_objects| {
label_on_objects
.into_iter()
.flat_map(|l_o| {
sync.relation_create(
prisma_sync::label_on_object::SyncId {
label: prisma_sync::label::SyncId {
name: l_o.label.name,
},
object: prisma_sync::object::SyncId {
pub_id: l_o.object.pub_id,
},
},
[sync_entry!(l_o.date_created, date_created)],
)
})
.map(|o| crdt_op_unchecked_db(&o, instance_id))
.collect::<Result<Vec<_>, _>>()
.map(|creates| db.crdt_operation().create_many(creates).exec())
},
)
.await
}

View file

@ -1,77 +1,102 @@
use rmp_serde::to_vec;
use sd_prisma::prisma::{cloud_crdt_operation, crdt_operation, instance, PrismaClient};
use sd_sync::CRDTOperation;
use sd_utils::from_bytes_to_uuid;
use tracing::instrument;
use uhlc::NTP64;
use uuid::Uuid;
crdt_operation::include!(crdt_include {
use super::Error;
crdt_operation::include!(crdt_with_instance {
instance: select { pub_id }
});
cloud_crdt_operation::include!(cloud_crdt_include {
cloud_crdt_operation::include!(cloud_crdt_with_instance {
instance: select { pub_id }
});
impl crdt_include::Data {
pub fn timestamp(&self) -> NTP64 {
impl crdt_with_instance::Data {
#[allow(clippy::cast_sign_loss)] // SAFETY: we had to store using i64 due to SQLite limitations
pub const fn timestamp(&self) -> NTP64 {
NTP64(self.timestamp as u64)
}
pub fn instance(&self) -> Uuid {
Uuid::from_slice(&self.instance.pub_id).unwrap()
from_bytes_to_uuid(&self.instance.pub_id)
}
pub fn into_operation(self) -> CRDTOperation {
CRDTOperation {
pub fn into_operation(self) -> Result<CRDTOperation, Error> {
Ok(CRDTOperation {
instance: self.instance(),
timestamp: self.timestamp(),
record_id: rmp_serde::from_slice(&self.record_id).unwrap(),
model: self.model as u16,
data: rmp_serde::from_slice(&self.data).unwrap(),
}
record_id: rmp_serde::from_slice(&self.record_id)?,
model: {
#[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)]
// SAFETY: we will not have more than 2^16 models and we had to store using signed
// integers due to SQLite limitations
{
self.model as u16
}
},
data: rmp_serde::from_slice(&self.data)?,
})
}
}
impl cloud_crdt_include::Data {
pub fn timestamp(&self) -> NTP64 {
impl cloud_crdt_with_instance::Data {
#[allow(clippy::cast_sign_loss)] // SAFETY: we had to store using i64 due to SQLite limitations
pub const fn timestamp(&self) -> NTP64 {
NTP64(self.timestamp as u64)
}
pub fn instance(&self) -> Uuid {
Uuid::from_slice(&self.instance.pub_id).unwrap()
from_bytes_to_uuid(&self.instance.pub_id)
}
pub fn into_operation(self) -> (i32, CRDTOperation) {
(
#[instrument(skip(self), err)]
pub fn into_operation(self) -> Result<(i32, CRDTOperation), Error> {
Ok((
self.id,
CRDTOperation {
instance: self.instance(),
timestamp: self.timestamp(),
record_id: rmp_serde::from_slice(&self.record_id).unwrap(),
model: self.model as u16,
data: serde_json::from_slice(&self.data).unwrap(),
record_id: rmp_serde::from_slice(&self.record_id)?,
model: {
#[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)]
// SAFETY: we will not have more than 2^16 models and we had to store using signed
// integers due to SQLite limitations
{
self.model as u16
}
},
data: rmp_serde::from_slice(&self.data)?,
},
)
))
}
}
pub async fn write_crdt_op_to_db(
op: &CRDTOperation,
db: &PrismaClient,
) -> Result<(), prisma_client_rust::QueryError> {
crdt_op_db(op).to_query(db).exec().await?;
Ok(())
}
fn crdt_op_db(op: &CRDTOperation) -> crdt_operation::Create {
#[instrument(skip(op, db), err)]
pub async fn write_crdt_op_to_db(op: &CRDTOperation, db: &PrismaClient) -> Result<(), Error> {
crdt_operation::Create {
timestamp: op.timestamp.0 as i64,
timestamp: {
#[allow(clippy::cast_possible_wrap)]
// SAFETY: we have to store using i64 due to SQLite limitations
{
op.timestamp.0 as i64
}
},
instance: instance::pub_id::equals(op.instance.as_bytes().to_vec()),
kind: op.kind().to_string(),
data: to_vec(&op.data).unwrap(),
model: op.model as i32,
record_id: rmp_serde::to_vec(&op.record_id).unwrap(),
data: rmp_serde::to_vec(&op.data)?,
model: i32::from(op.model),
record_id: rmp_serde::to_vec(&op.record_id)?,
_params: vec![],
}
.to_query(db)
.select(crdt_operation::select!({ id })) // To don't fetch the whole object for nothing
.exec()
.await
.map_or_else(|e| Err(e.into()), |_| Ok(()))
}

View file

@ -1,27 +1,38 @@
use std::{
collections::BTreeMap,
num::NonZeroU128,
ops::Deref,
sync::{atomic::Ordering, Arc},
};
use sd_prisma::{
prisma::{crdt_operation, SortOrder},
prisma::{crdt_operation, PrismaClient, SortOrder},
prisma_sync::ModelSyncData,
};
use sd_sync::{
CRDTOperation, CRDTOperationData, CompressedCRDTOperation, CompressedCRDTOperations,
OperationKind,
};
use tokio::sync::{mpsc, oneshot, Mutex};
use tracing::debug;
use std::{
collections::BTreeMap,
future::IntoFuture,
num::NonZeroU128,
ops::Deref,
pin::pin,
sync::{atomic::Ordering, Arc},
time::SystemTime,
};
use async_channel as chan;
use futures::{stream, FutureExt, StreamExt};
use futures_concurrency::{
future::{Race, TryJoin},
stream::Merge,
};
use prisma_client_rust::chrono::{DateTime, Utc};
use tokio::sync::oneshot;
use tracing::{debug, error, instrument, trace, warn};
use uhlc::{Timestamp, NTP64};
use uuid::Uuid;
use crate::{
actor::{create_actor_io, ActorIO, ActorTypes},
use super::{
actor::{create_actor_io, ActorIO, ActorTypes, HandlerIO},
db_operation::write_crdt_op_to_db,
wait, SharedState,
Error, SharedState,
};
#[derive(Debug)]
@ -32,7 +43,6 @@ pub enum Request {
timestamps: Vec<(Uuid, NTP64)>,
tx: oneshot::Sender<()>,
},
// Ingested,
FinishedIngesting,
}
@ -53,7 +63,7 @@ pub enum State {
/// The single entrypoint for sync operation ingestion.
/// Requests sync operations in a given timestamp range,
/// and attempts to write them to the syn coperations table along with
/// and attempts to write them to the sync operations table along with
/// the actual cell that the operation points to.
///
/// If this actor stops running, no sync operations will
@ -66,133 +76,203 @@ pub struct Actor {
}
impl Actor {
async fn tick(mut self) -> Option<Self> {
let state = match self.state.take()? {
State::WaitingForNotification => {
self.shared.active.store(false, Ordering::Relaxed);
self.shared.active_notify.notify_waiters();
wait!(self.io.event_rx.lock().await, Event::Notification);
self.shared.active.store(true, Ordering::Relaxed);
self.shared.active_notify.notify_waiters();
State::RetrievingMessages
}
State::RetrievingMessages => {
let (tx, mut rx) = oneshot::channel::<()>();
let timestamps = self
.timestamps
.read()
.await
.iter()
.map(|(&k, &v)| (k, v))
.collect();
self.io
.send(Request::Messages { timestamps, tx })
.await
.ok();
let mut event_rx = self.io.event_rx.lock().await;
loop {
tokio::select! {
biased;
res = event_rx.recv() => {
if let Some(Event::Messages(event)) = res { break State::Ingesting(event) }
}
res = &mut rx => {
if res.is_err() {
debug!("messages request ignored");
break State::WaitingForNotification
}
},
}
}
}
State::Ingesting(event) => {
debug!(
messages_count = event.messages.len(),
first_message = event.messages.first().unwrap().3.timestamp.as_u64(),
last_message = event.messages.last().unwrap().3.timestamp.as_u64(),
"Ingesting operations;",
);
for (instance, data) in event.messages.0 {
for (model, data) in data {
for (record, ops) in data {
self.receive_crdt_operations(instance, model, record, ops)
.await
.expect("sync ingest failed");
}
}
}
if let Some(tx) = event.wait_tx {
tx.send(()).ok();
}
match event.has_more {
true => State::RetrievingMessages,
false => {
self.io.send(Request::FinishedIngesting).await.ok();
State::WaitingForNotification
}
}
}
#[instrument(skip(self), fields(old_state = ?self.state))]
async fn tick(&mut self) {
let state = match self
.state
.take()
.expect("ingest actor in inconsistent state")
{
State::WaitingForNotification => self.waiting_for_notification_state_transition().await,
State::RetrievingMessages => self.retrieving_messages_state_transition().await,
State::Ingesting(event) => self.ingesting_state_transition(event).await,
};
Some(Self {
state: Some(state),
..self
})
trace!(?state, "Actor state transitioned;");
self.state = Some(state);
}
async fn waiting_for_notification_state_transition(&self) -> State {
self.shared.active.store(false, Ordering::Relaxed);
self.shared.active_notify.notify_waiters();
loop {
match self
.io
.event_rx
.recv()
.await
.expect("sync actor receiver unexpectedly closed")
{
Event::Notification => {
trace!("Received notification");
break;
}
Event::Messages(event) => {
trace!(
?event,
"Ignored event message as we're waiting for a `Event::Notification`"
);
}
}
}
self.shared.active.store(true, Ordering::Relaxed);
self.shared.active_notify.notify_waiters();
State::RetrievingMessages
}
async fn retrieving_messages_state_transition(&self) -> State {
enum StreamMessage {
NewEvent(Event),
AckedRequest(Result<(), oneshot::error::RecvError>),
}
let (tx, rx) = oneshot::channel::<()>();
let timestamps = self
.timestamps
.read()
.await
.iter()
.map(|(&uid, &timestamp)| (uid, timestamp))
.collect();
if self
.io
.send(Request::Messages { timestamps, tx })
.await
.is_err()
{
warn!("Failed to send messages request");
}
let mut msg_stream = pin!((
self.io.event_rx.clone().map(StreamMessage::NewEvent),
stream::once(rx.map(StreamMessage::AckedRequest)),
)
.merge());
loop {
if let Some(msg) = msg_stream.next().await {
match msg {
StreamMessage::NewEvent(event) => {
if let Event::Messages(messages) = event {
trace!(?messages, "Received messages;");
break State::Ingesting(messages);
}
}
StreamMessage::AckedRequest(res) => {
if res.is_err() {
debug!("messages request ignored");
break State::WaitingForNotification;
}
}
}
} else {
break State::WaitingForNotification;
}
}
}
async fn ingesting_state_transition(&mut self, event: MessagesEvent) -> State {
debug!(
messages_count = event.messages.len(),
first_message = ?DateTime::<Utc>::from(
event.messages
.first()
.map_or(SystemTime::UNIX_EPOCH, |m| m.3.timestamp.to_system_time())
),
last_message = ?DateTime::<Utc>::from(
event.messages
.last()
.map_or(SystemTime::UNIX_EPOCH, |m| m.3.timestamp.to_system_time())
),
"Ingesting operations;",
);
for (instance, data) in event.messages.0 {
for (model, data) in data {
for (record, ops) in data {
if let Err(e) = self
.process_crdt_operations(instance, model, record, ops)
.await
{
error!(?e, "Failed to ingest CRDT operations;");
}
}
}
}
if let Some(tx) = event.wait_tx {
if tx.send(()).is_err() {
warn!("Failed to send wait_tx signal");
}
}
if event.has_more {
State::RetrievingMessages
} else {
{
if self.io.send(Request::FinishedIngesting).await.is_err() {
error!("Failed to send finished ingesting request");
}
State::WaitingForNotification
}
}
}
pub async fn declare(shared: Arc<SharedState>) -> Handler {
let (actor_io, handler_io) = create_actor_io::<Self>();
let (io, HandlerIO { event_tx, req_rx }) = create_actor_io::<Self>();
shared
.actors
.declare(
"Sync Ingest",
{
let shared = shared.clone();
move || async move {
let shared = Arc::clone(&shared);
move |stop| async move {
enum Race {
Ticked,
Stopped,
}
let mut this = Self {
state: Some(Default::default()),
io: actor_io,
state: Some(State::default()),
io,
shared,
};
loop {
this = match this.tick().await {
Some(this) => this,
None => break,
};
}
while matches!(
(
this.tick().map(|()| Race::Ticked),
stop.into_future().map(|()| Race::Stopped),
)
.race()
.await,
Race::Ticked
) { /* Everything is Awesome! */ }
}
},
true,
)
.await;
Handler {
event_tx: handler_io.event_tx,
req_rx: Arc::new(Mutex::new(handler_io.req_rx)),
}
Handler { event_tx, req_rx }
}
// where the magic happens
async fn receive_crdt_operations(
#[instrument(skip(self, ops), fields(operations_count = %ops.len()), err)]
async fn process_crdt_operations(
&mut self,
instance: Uuid,
model: u16,
record_id: rmpv::Value,
mut ops: Vec<CompressedCRDTOperation>,
) -> prisma_client_rust::Result<()> {
) -> Result<(), Error> {
let db = &self.db;
ops.sort_by_key(|op| op.timestamp);
@ -209,52 +289,31 @@ impl Actor {
.expect("timestamp has too much drift!");
// read the timestamp for the operation's instance, or insert one if it doesn't exist
let timestamp = self.timestamps.read().await.get(&instance).cloned();
let timestamp = self.timestamps.read().await.get(&instance).copied();
// Delete - ignores all other messages
if let Some(delete_op) = ops
.iter()
.rev()
.find(|op| matches!(op.data, sd_sync::CRDTOperationData::Delete))
.find(|op| matches!(op.data, CRDTOperationData::Delete))
{
// deletes are the be all and end all, no need to check anything
let op = CRDTOperation {
instance,
model,
record_id,
timestamp: delete_op.timestamp,
data: CRDTOperationData::Delete,
};
self.db
._transaction()
.with_timeout(30 * 1000)
.run(|db| async move {
ModelSyncData::from_op(op.clone())
.unwrap()
.exec(&db)
.await?;
write_crdt_op_to_db(&op, &db).await?;
Ok(())
})
.await?;
trace!("Deleting operation");
handle_crdt_deletion(db, instance, model, record_id, delete_op).await?;
}
// Create + > 0 Update - overwrites the create's data with the updates
else if let Some(timestamp) = ops.iter().rev().find_map(|op| {
if let sd_sync::CRDTOperationData::Create(_) = &op.data {
return Some(op.timestamp);
}
else if let Some(timestamp) = ops
.iter()
.rev()
.find_map(|op| matches!(&op.data, CRDTOperationData::Create(_)).then_some(op.timestamp))
{
trace!("Create + Updates operations");
None
}) {
// conflict resolution
let delete = db
.crdt_operation()
.find_first(vec![
crdt_operation::model::equals(model as i32),
crdt_operation::record_id::equals(rmp_serde::to_vec(&record_id).unwrap()),
crdt_operation::model::equals(i32::from(model)),
crdt_operation::record_id::equals(rmp_serde::to_vec(&record_id)?),
crdt_operation::kind::equals(OperationKind::Delete.to_string()),
])
.order_by(crdt_operation::timestamp::order(SortOrder::Desc))
@ -262,73 +321,16 @@ impl Actor {
.await?;
if delete.is_some() {
debug!("Found a previous delete operation with the same SyncId, will ignore these operations");
return Ok(());
}
let mut data = BTreeMap::new();
let mut applied_ops = vec![];
// search for all Updates until a Create is found
for op in ops.iter().rev() {
match &op.data {
CRDTOperationData::Delete => unreachable!("Delete can't exist here!"),
CRDTOperationData::Create(create_data) => {
for (k, v) in create_data {
data.entry(k).or_insert(v);
}
applied_ops.push(op);
break;
}
CRDTOperationData::Update { field, value } => {
applied_ops.push(op);
data.insert(field, value);
}
}
}
self.db
._transaction()
.with_timeout(30 * 1000)
.run(|db| async move {
// fake a create with a bunch of data rather than individual insert
ModelSyncData::from_op(CRDTOperation {
instance,
model,
record_id: record_id.clone(),
timestamp,
data: CRDTOperationData::Create(
data.into_iter()
.map(|(k, v)| (k.clone(), v.clone()))
.collect(),
),
})
.unwrap()
.exec(&db)
.await?;
for op in applied_ops {
write_crdt_op_to_db(
&CRDTOperation {
instance,
model,
record_id: record_id.clone(),
timestamp: op.timestamp,
data: op.data.clone(),
},
&db,
)
.await?;
}
Ok(())
})
.await?;
handle_crdt_create_and_updates(db, instance, model, record_id, ops, timestamp).await?;
}
// > 0 Update - batches updates with a fake Create op
else {
trace!("Updates operation");
let mut data = BTreeMap::new();
for op in ops.into_iter().rev() {
@ -344,84 +346,43 @@ impl Actor {
._batch((
db.crdt_operation()
.find_first(vec![
crdt_operation::model::equals(model as i32),
crdt_operation::record_id::equals(
rmp_serde::to_vec(&record_id).unwrap(),
),
crdt_operation::model::equals(i32::from(model)),
crdt_operation::record_id::equals(rmp_serde::to_vec(&record_id)?),
crdt_operation::kind::equals(OperationKind::Create.to_string()),
])
.order_by(crdt_operation::timestamp::order(SortOrder::Desc)),
data.iter()
.map(|(k, (_, timestamp))| {
db.crdt_operation()
Ok(db
.crdt_operation()
.find_first(vec![
crdt_operation::timestamp::gt(timestamp.as_u64() as i64),
crdt_operation::model::equals(model as i32),
crdt_operation::record_id::equals(
rmp_serde::to_vec(&record_id).unwrap(),
),
crdt_operation::timestamp::gt({
#[allow(clippy::cast_possible_wrap)]
// SAFETY: we had to store using i64 due to SQLite limitations
{
timestamp.as_u64() as i64
}
}),
crdt_operation::model::equals(i32::from(model)),
crdt_operation::record_id::equals(rmp_serde::to_vec(
&record_id,
)?),
crdt_operation::kind::equals(
OperationKind::Update(k).to_string(),
),
])
.order_by(crdt_operation::timestamp::order(SortOrder::Desc))
.order_by(crdt_operation::timestamp::order(SortOrder::Desc)))
})
.collect::<Vec<_>>(),
.collect::<Result<Vec<_>, Error>>()?,
))
.await?;
if create.is_none() {
warn!("Failed to find a previous create operation with the same SyncId");
return Ok(());
}
// does the same thing as processing ops one-by-one and returning early if a newer op was found
for (update, key) in updates
.into_iter()
.zip(data.keys().cloned().collect::<Vec<_>>())
{
if update.is_some() {
data.remove(&key);
}
}
self.db
._transaction()
.with_timeout(30 * 1000)
.run(|db| async move {
// fake operation to batch them all at once
ModelSyncData::from_op(CRDTOperation {
instance,
model,
record_id: record_id.clone(),
timestamp: NTP64(0),
data: CRDTOperationData::Create(
data.iter()
.map(|(k, (data, _))| (k.to_string(), data.clone()))
.collect(),
),
})
.unwrap()
.exec(&db)
.await?;
// need to only apply ops that haven't been filtered out
for (field, (value, timestamp)) in data {
write_crdt_op_to_db(
&CRDTOperation {
instance,
model,
record_id: record_id.clone(),
timestamp,
data: CRDTOperationData::Update { field, value },
},
&db,
)
.await?;
}
Ok(())
})
.await?;
handle_crdt_updates(db, instance, model, record_id, data, updates).await?;
}
// update the stored timestamp for this instance - will be derived from the crdt operations table on restart
@ -429,12 +390,179 @@ impl Actor {
self.timestamps.write().await.insert(instance, new_ts);
// self.io.req_tx.send(Request::Ingested).await.ok();
Ok(())
}
}
async fn handle_crdt_updates(
db: &PrismaClient,
instance: Uuid,
model: u16,
record_id: rmpv::Value,
mut data: BTreeMap<String, (rmpv::Value, NTP64)>,
updates: Vec<Option<crdt_operation::Data>>,
) -> Result<(), Error> {
let keys = data.keys().cloned().collect::<Vec<_>>();
// does the same thing as processing ops one-by-one and returning early if a newer op was found
for (update, key) in updates.into_iter().zip(keys) {
if update.is_some() {
data.remove(&key);
}
}
db._transaction()
.with_timeout(30 * 1000)
.run(|db| async move {
// fake operation to batch them all at once
ModelSyncData::from_op(CRDTOperation {
instance,
model,
record_id: record_id.clone(),
timestamp: NTP64(0),
data: CRDTOperationData::Create(
data.iter()
.map(|(k, (data, _))| (k.clone(), data.clone()))
.collect(),
),
})
.ok_or(Error::InvalidModelId(model))?
.exec(&db)
.await?;
// need to only apply ops that haven't been filtered out
data.into_iter()
.map(|(field, (value, timestamp))| {
let record_id = record_id.clone();
let db = &db;
async move {
write_crdt_op_to_db(
&CRDTOperation {
instance,
model,
record_id,
timestamp,
data: CRDTOperationData::Update { field, value },
},
db,
)
.await
}
})
.collect::<Vec<_>>()
.try_join()
.await
.map(|_| ())
})
.await
}
async fn handle_crdt_create_and_updates(
db: &PrismaClient,
instance: Uuid,
model: u16,
record_id: rmpv::Value,
ops: Vec<CompressedCRDTOperation>,
timestamp: NTP64,
) -> Result<(), Error> {
let mut data = BTreeMap::new();
let mut applied_ops = vec![];
// search for all Updates until a Create is found
for op in ops.iter().rev() {
match &op.data {
CRDTOperationData::Delete => unreachable!("Delete can't exist here!"),
CRDTOperationData::Create(create_data) => {
for (k, v) in create_data {
data.entry(k).or_insert(v);
}
applied_ops.push(op);
break;
}
CRDTOperationData::Update { field, value } => {
applied_ops.push(op);
data.insert(field, value);
}
}
}
db._transaction()
.with_timeout(30 * 1000)
.run(|db| async move {
// fake a create with a bunch of data rather than individual insert
ModelSyncData::from_op(CRDTOperation {
instance,
model,
record_id: record_id.clone(),
timestamp,
data: CRDTOperationData::Create(
data.into_iter()
.map(|(k, v)| (k.clone(), v.clone()))
.collect(),
),
})
.ok_or(Error::InvalidModelId(model))?
.exec(&db)
.await?;
applied_ops
.into_iter()
.map(|op| {
let record_id = record_id.clone();
let db = &db;
async move {
let operation = CRDTOperation {
instance,
model,
record_id,
timestamp: op.timestamp,
data: op.data.clone(),
};
write_crdt_op_to_db(&operation, db).await
}
})
.collect::<Vec<_>>()
.try_join()
.await
.map(|_| ())
})
.await
}
async fn handle_crdt_deletion(
db: &PrismaClient,
instance: Uuid,
model: u16,
record_id: rmpv::Value,
delete_op: &CompressedCRDTOperation,
) -> Result<(), Error> {
// deletes are the be all and end all, no need to check anything
let op = CRDTOperation {
instance,
model,
record_id,
timestamp: delete_op.timestamp,
data: CRDTOperationData::Delete,
};
db._transaction()
.with_timeout(30 * 1000)
.run(|db| async move {
ModelSyncData::from_op(op.clone())
.ok_or(Error::InvalidModelId(model))?
.exec(&db)
.await?;
write_crdt_op_to_db(&op, &db).await
})
.await
}
impl Deref for Actor {
type Target = SharedState;
@ -444,8 +572,8 @@ impl Deref for Actor {
}
pub struct Handler {
pub event_tx: mpsc::Sender<Event>,
pub req_rx: Arc<Mutex<mpsc::Receiver<Request>>>,
pub event_tx: chan::Sender<Event>,
pub req_rx: chan::Receiver<Request>,
}
#[derive(Debug)]
@ -466,12 +594,13 @@ impl ActorTypes for Actor {
mod test {
use std::{sync::atomic::AtomicBool, time::Duration};
use tokio::sync::Notify;
use uhlc::HLCBuilder;
use super::*;
async fn new_actor() -> (Handler, Arc<SharedState>) {
let instance = uuid::Uuid::new_v4();
let instance = Uuid::new_v4();
let shared = Arc::new(SharedState {
db: sd_prisma::test_db().await,
instance,
@ -480,14 +609,14 @@ mod test {
NonZeroU128::new(instance.to_u128_le()).expect("Non zero id"),
))
.build(),
timestamps: Default::default(),
timestamps: Arc::default(),
emit_messages_flag: Arc::new(AtomicBool::new(true)),
active: Default::default(),
active_notify: Default::default(),
actors: Default::default(),
active: AtomicBool::default(),
active_notify: Notify::default(),
actors: Arc::default(),
});
(Actor::declare(shared.clone()).await, shared)
(Actor::declare(Arc::clone(&shared)).await, shared)
}
/// If messages tx is dropped, actor should reset and assume no further messages
@ -497,11 +626,9 @@ mod test {
let (ingest, _) = new_actor().await;
for _ in 0..10 {
let mut rx = ingest.req_rx.lock().await;
ingest.event_tx.send(Event::Notification).await.unwrap();
let Some(Request::Messages { .. }) = rx.recv().await else {
let Ok(Request::Messages { .. }) = ingest.req_rx.recv().await else {
panic!("bruh")
};

View file

@ -1,10 +1,31 @@
#![allow(clippy::unwrap_used, clippy::panic)] // TODO: Brendan remove this once you've got error handling here
mod actor;
pub mod backfill;
mod db_operation;
pub mod ingest;
mod manager;
#![warn(
clippy::all,
clippy::pedantic,
clippy::correctness,
clippy::perf,
clippy::style,
clippy::suspicious,
clippy::complexity,
clippy::nursery,
clippy::unwrap_used,
unused_qualifications,
rust_2018_idioms,
trivial_casts,
trivial_numeric_casts,
unused_allocation,
clippy::unnecessary_cast,
clippy::cast_lossless,
clippy::cast_possible_truncation,
clippy::cast_possible_wrap,
clippy::cast_precision_loss,
clippy::cast_sign_loss,
clippy::dbg_macro,
clippy::deprecated_cfg_attr,
clippy::separated_literal_suffix,
deprecated
)]
#![forbid(deprecated_in_future)]
#![allow(clippy::missing_errors_doc, clippy::module_name_repetitions)]
use sd_prisma::prisma::{crdt_operation, instance, PrismaClient};
use sd_sync::CRDTOperation;
@ -14,6 +35,15 @@ use std::{
sync::{atomic::AtomicBool, Arc},
};
use tokio::sync::{Notify, RwLock};
use uuid::Uuid;
mod actor;
pub mod backfill;
mod db_operation;
pub mod ingest;
mod manager;
pub use ingest::*;
pub use manager::*;
pub use uhlc::NTP64;
@ -24,44 +54,83 @@ pub enum SyncMessage {
Created,
}
pub type Timestamps = Arc<tokio::sync::RwLock<HashMap<uuid::Uuid, NTP64>>>;
pub type Timestamps = Arc<RwLock<HashMap<Uuid, NTP64>>>;
pub struct SharedState {
pub db: Arc<PrismaClient>,
pub emit_messages_flag: Arc<AtomicBool>,
pub instance: uuid::Uuid,
pub instance: Uuid,
pub timestamps: Timestamps,
pub clock: uhlc::HLC,
pub active: AtomicBool,
pub active_notify: tokio::sync::Notify,
pub active_notify: Notify,
pub actors: Arc<sd_actors::Actors>,
}
#[must_use]
pub fn crdt_op_db(op: &CRDTOperation) -> crdt_operation::Create {
crdt_operation::Create {
timestamp: op.timestamp.0 as i64,
instance: instance::pub_id::equals(op.instance.as_bytes().to_vec()),
kind: op.kind().to_string(),
data: rmp_serde::to_vec(&op.data).unwrap(),
model: op.model as i32,
record_id: rmp_serde::to_vec(&op.record_id).unwrap(),
_params: vec![],
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error("serialization error: {0}")]
Serialization(#[from] rmp_serde::encode::Error),
#[error("deserialization error: {0}")]
Deserialization(#[from] rmp_serde::decode::Error),
#[error("database error: {0}")]
Database(#[from] prisma_client_rust::QueryError),
#[error("invalid model id: {0}")]
InvalidModelId(u16),
}
impl From<Error> for rspc::Error {
fn from(e: Error) -> Self {
match e {
Error::Database(e) => e.into(),
Error::InvalidModelId(id) => Self::new(
rspc::ErrorCode::BadRequest,
format!("Invalid model id <id={id}>"),
),
_ => Self::with_cause(
rspc::ErrorCode::InternalServerError,
"Internal sync error".to_string(),
e,
),
}
}
}
#[must_use]
pub fn crdt_op_db(op: &CRDTOperation) -> Result<crdt_operation::Create, Error> {
Ok(crdt_operation::Create {
timestamp: {
#[allow(clippy::cast_possible_wrap)]
// SAFETY: we had to store using i64 due to SQLite limitations
{
op.timestamp.as_u64() as i64
}
},
instance: instance::pub_id::equals(op.instance.as_bytes().to_vec()),
kind: op.kind().to_string(),
data: rmp_serde::to_vec(&op.data)?,
model: i32::from(op.model),
record_id: rmp_serde::to_vec(&op.record_id)?,
_params: vec![],
})
}
pub fn crdt_op_unchecked_db(
op: &CRDTOperation,
instance_id: i32,
) -> crdt_operation::CreateUnchecked {
crdt_operation::CreateUnchecked {
timestamp: op.timestamp.0 as i64,
) -> Result<crdt_operation::CreateUnchecked, Error> {
Ok(crdt_operation::CreateUnchecked {
timestamp: {
#[allow(clippy::cast_possible_wrap)]
// SAFETY: we had to store using i64 due to SQLite limitations
{
op.timestamp.as_u64() as i64
}
},
instance_id,
kind: op.kind().to_string(),
data: rmp_serde::to_vec(&op.data).unwrap(),
model: op.model as i32,
record_id: rmp_serde::to_vec(&op.record_id).unwrap(),
data: rmp_serde::to_vec(&op.data)?,
model: i32::from(op.model),
record_id: rmp_serde::to_vec(&op.record_id)?,
_params: vec![],
}
})
}

View file

@ -1,13 +1,10 @@
use crate::{crdt_op_db, db_operation::*, ingest, SharedState, SyncMessage, NTP64};
use sd_prisma::prisma::{cloud_crdt_operation, crdt_operation, instance, PrismaClient, SortOrder};
use sd_sync::{CRDTOperation, OperationFactory};
use sd_utils::uuid_to_bytes;
use sd_utils::{from_bytes_to_uuid, uuid_to_bytes};
use tracing::warn;
use std::{
cmp::Ordering,
collections::HashMap,
fmt,
cmp, fmt,
num::NonZeroU128,
ops::Deref,
sync::{
@ -16,16 +13,23 @@ use std::{
},
};
use tokio::sync::{broadcast, RwLock};
use prisma_client_rust::{and, operator::or};
use tokio::sync::{broadcast, Mutex, Notify, RwLock};
use uhlc::{HLCBuilder, HLC};
use uuid::Uuid;
use super::{
crdt_op_db,
db_operation::{cloud_crdt_with_instance, crdt_with_instance},
ingest, Error, SharedState, SyncMessage, NTP64,
};
/// Wrapper that spawns the ingest actor and provides utilities for reading and writing sync operations.
pub struct Manager {
pub tx: broadcast::Sender<SyncMessage>,
pub ingest: ingest::Handler,
pub shared: Arc<SharedState>,
pub timestamp_lock: tokio::sync::Semaphore,
pub timestamp_lock: Mutex<()>,
}
impl fmt::Debug for Manager {
@ -40,74 +44,122 @@ pub struct GetOpsArgs {
pub count: u32,
}
pub struct New {
pub manager: Manager,
pub rx: broadcast::Receiver<SyncMessage>,
}
impl Manager {
#[allow(clippy::new_ret_no_self)]
/// Creates a new manager that can be used to read and write CRDT operations.
/// Sync messages are received on the returned [`broadcast::Receiver<SyncMessage>`].
pub async fn new(
db: &Arc<PrismaClient>,
instance: Uuid,
emit_messages_flag: &Arc<AtomicBool>,
timestamps: HashMap<Uuid, NTP64>,
actors: &Arc<sd_actors::Actors>,
) -> New {
db: Arc<PrismaClient>,
current_instance_uuid: Uuid,
emit_messages_flag: Arc<AtomicBool>,
actors: Arc<sd_actors::Actors>,
) -> Result<(Self, broadcast::Receiver<SyncMessage>), Error> {
let existing_instances = db.instance().find_many(vec![]).exec().await?;
Self::with_existing_instances(
db,
current_instance_uuid,
emit_messages_flag,
&existing_instances,
actors,
)
.await
}
/// Creates a new manager that can be used to read and write CRDT operations from a list of existing instances.
/// Sync messages are received on the returned [`broadcast::Receiver<SyncMessage>`].
///
/// # Panics
/// Panics if the `current_instance_id` UUID is zeroed.
pub async fn with_existing_instances(
db: Arc<PrismaClient>,
current_instance_uuid: Uuid,
emit_messages_flag: Arc<AtomicBool>,
existing_instances: &[instance::Data],
actors: Arc<sd_actors::Actors>,
) -> Result<(Self, broadcast::Receiver<SyncMessage>), Error> {
let timestamps = db
._batch(
existing_instances
.iter()
.map(|i| {
db.crdt_operation()
.find_first(vec![crdt_operation::instance::is(vec![
instance::id::equals(i.id),
])])
.order_by(crdt_operation::timestamp::order(SortOrder::Desc))
})
.collect::<Vec<_>>(),
)
.await?
.into_iter()
.zip(existing_instances)
.map(|(op, i)| {
(
from_bytes_to_uuid(&i.pub_id),
#[allow(clippy::cast_sign_loss)]
// SAFETY: we had to store using i64 due to SQLite limitations
NTP64(op.map(|o| o.timestamp).unwrap_or_default() as u64),
)
})
.collect();
let (tx, rx) = broadcast::channel(64);
let clock = HLCBuilder::new()
.with_id(uhlc::ID::from(
NonZeroU128::new(instance.to_u128_le()).expect("Non zero id"),
NonZeroU128::new(current_instance_uuid.to_u128_le()).expect("Non zero id"),
))
.build();
let shared = Arc::new(SharedState {
db: db.clone(),
instance,
db,
instance: current_instance_uuid,
clock,
timestamps: Arc::new(RwLock::new(timestamps)),
emit_messages_flag: emit_messages_flag.clone(),
active: Default::default(),
active_notify: Default::default(),
actors: actors.clone(),
emit_messages_flag,
active: AtomicBool::default(),
active_notify: Notify::default(),
actors,
});
let ingest = ingest::Actor::declare(shared.clone()).await;
New {
manager: Self {
Ok((
Self {
tx,
ingest,
shared,
timestamp_lock: tokio::sync::Semaphore::new(1),
timestamp_lock: Mutex::default(),
},
rx,
}
))
}
pub fn subscribe(&self) -> broadcast::Receiver<SyncMessage> {
self.tx.subscribe()
}
pub async fn write_ops<'item, I: prisma_client_rust::BatchItem<'item>>(
pub async fn write_ops<'item, Q>(
&self,
tx: &PrismaClient,
(mut ops, queries): (Vec<CRDTOperation>, I),
) -> prisma_client_rust::Result<<I as prisma_client_rust::BatchItemParent>::ReturnValue> {
(mut ops, queries): (Vec<CRDTOperation>, Q),
) -> Result<Q::ReturnValue, Error>
where
Q: prisma_client_rust::BatchItem<'item, ReturnValue: Send> + Send,
{
let ret = if self.emit_messages_flag.load(atomic::Ordering::Relaxed) {
let lock = self.timestamp_lock.acquire().await;
let lock = self.timestamp_lock.lock().await;
ops.iter_mut().for_each(|op| {
for op in &mut ops {
op.timestamp = *self.get_clock().new_timestamp().get_time();
});
}
let (res, _) = tx
._batch((
queries,
ops.iter()
.map(|op| crdt_op_db(op).to_query(tx))
.collect::<Vec<_>>(),
.map(|op| crdt_op_db(op).map(|q| q.to_query(tx)))
.collect::<Result<Vec<_>, _>>()?,
))
.await?;
@ -119,7 +171,9 @@ impl Manager {
.insert(self.instance, last.timestamp);
}
self.tx.send(SyncMessage::Created).ok();
if self.tx.send(SyncMessage::Created).is_err() {
warn!("failed to send created message on `write_ops`");
}
drop(lock);
@ -131,21 +185,25 @@ impl Manager {
Ok(ret)
}
#[allow(unused_variables)]
pub async fn write_op<'item, Q: prisma_client_rust::BatchItem<'item>>(
pub async fn write_op<'item, Q>(
&self,
tx: &PrismaClient,
mut op: CRDTOperation,
query: Q,
) -> prisma_client_rust::Result<<Q as prisma_client_rust::BatchItemParent>::ReturnValue> {
) -> Result<Q::ReturnValue, Error>
where
Q: prisma_client_rust::BatchItem<'item, ReturnValue: Send> + Send,
{
let ret = if self.emit_messages_flag.load(atomic::Ordering::Relaxed) {
let lock = self.timestamp_lock.acquire().await;
let lock = self.timestamp_lock.lock().await;
op.timestamp = *self.get_clock().new_timestamp().get_time();
let ret = tx._batch((crdt_op_db(&op).to_query(tx), query)).await?.1;
let ret = tx._batch((crdt_op_db(&op)?.to_query(tx), query)).await?.1;
self.tx.send(SyncMessage::Created).ok();
if self.tx.send(SyncMessage::Created).is_err() {
warn!("failed to send created message on `write_op`");
}
drop(lock);
@ -168,143 +226,121 @@ impl Manager {
count: u32,
instance_uuid: Uuid,
timestamp: NTP64,
) -> prisma_client_rust::Result<Vec<CRDTOperation>> {
let db = &self.db;
Ok(db
) -> Result<Vec<CRDTOperation>, Error> {
self.db
.crdt_operation()
.find_many(vec![
crdt_operation::instance::is(vec![instance::pub_id::equals(uuid_to_bytes(
&instance_uuid,
))]),
#[allow(clippy::cast_possible_wrap)]
crdt_operation::timestamp::gt(timestamp.as_u64() as i64),
])
.take(i64::from(count))
.order_by(crdt_operation::timestamp::order(SortOrder::Asc))
.include(crdt_include::include())
.include(crdt_with_instance::include())
.exec()
.await?
.into_iter()
.map(|o| o.into_operation())
.collect())
.map(crdt_with_instance::Data::into_operation)
.collect()
}
pub async fn get_ops(
&self,
args: GetOpsArgs,
) -> prisma_client_rust::Result<Vec<CRDTOperation>> {
let db = &self.db;
macro_rules! db_args {
($args:ident, $op:ident) => {
vec![prisma_client_rust::operator::or(
$args
.clocks
.iter()
.map(|(instance_id, timestamp)| {
prisma_client_rust::and![
$op::instance::is(vec![instance::pub_id::equals(uuid_to_bytes(
instance_id
))]),
$op::timestamp::gt(timestamp.as_u64() as i64)
]
})
.chain([
$op::instance::is_not(vec![
instance::pub_id::in_vec(
$args
.clocks
.iter()
.map(|(instance_id, _)| {
uuid_to_bytes(instance_id)
})
.collect()
)
])
])
.collect(),
)]
};
}
let mut ops = db
pub async fn get_ops(&self, args: GetOpsArgs) -> Result<Vec<CRDTOperation>, Error> {
let mut ops = self
.db
.crdt_operation()
.find_many(db_args!(args, crdt_operation))
.find_many(vec![or(args
.clocks
.iter()
.map(|(instance_id, timestamp)| {
and![
crdt_operation::instance::is(vec![instance::pub_id::equals(
uuid_to_bytes(instance_id)
)]),
crdt_operation::timestamp::gt({
#[allow(clippy::cast_possible_wrap)]
// SAFETY: we had to store using i64 due to SQLite limitations
{
timestamp.as_u64() as i64
}
})
]
})
.chain([crdt_operation::instance::is_not(vec![
instance::pub_id::in_vec(
args.clocks
.iter()
.map(|(instance_id, _)| uuid_to_bytes(instance_id))
.collect(),
),
])])
.collect())])
.take(i64::from(args.count))
.order_by(crdt_operation::timestamp::order(SortOrder::Asc))
.include(crdt_include::include())
.include(crdt_with_instance::include())
.exec()
.await?;
ops.sort_by(|a, b| match a.timestamp().cmp(&b.timestamp()) {
Ordering::Equal => a.instance().cmp(&b.instance()),
cmp::Ordering::Equal => a.instance().cmp(&b.instance()),
o => o,
});
Ok(ops
.into_iter()
ops.into_iter()
.take(args.count as usize)
.map(|o| o.into_operation())
.collect())
.map(crdt_with_instance::Data::into_operation)
.collect()
}
pub async fn get_cloud_ops(
&self,
args: GetOpsArgs,
) -> prisma_client_rust::Result<Vec<(i32, CRDTOperation)>> {
let db = &self.db;
macro_rules! db_args {
($args:ident, $op:ident) => {
vec![prisma_client_rust::operator::or(
$args
.clocks
.iter()
.map(|(instance_id, timestamp)| {
prisma_client_rust::and![
$op::instance::is(vec![instance::pub_id::equals(uuid_to_bytes(
instance_id
))]),
$op::timestamp::gt(timestamp.as_u64() as i64)
]
})
.chain([
$op::instance::is_not(vec![
instance::pub_id::in_vec(
$args
.clocks
.iter()
.map(|(instance_id, _)| {
uuid_to_bytes(instance_id)
})
.collect()
)
])
])
.collect(),
)]
};
}
let mut ops = db
) -> Result<Vec<(i32, CRDTOperation)>, Error> {
let mut ops = self
.db
.cloud_crdt_operation()
.find_many(db_args!(args, cloud_crdt_operation))
.find_many(vec![or(args
.clocks
.iter()
.map(|(instance_id, timestamp)| {
and![
cloud_crdt_operation::instance::is(vec![instance::pub_id::equals(
uuid_to_bytes(instance_id)
)]),
cloud_crdt_operation::timestamp::gt({
#[allow(clippy::cast_possible_wrap)]
// SAFETY: we had to store using i64 due to SQLite limitations
{
timestamp.as_u64() as i64
}
})
]
})
.chain([cloud_crdt_operation::instance::is_not(vec![
instance::pub_id::in_vec(
args.clocks
.iter()
.map(|(instance_id, _)| uuid_to_bytes(instance_id))
.collect(),
),
])])
.collect())])
.take(i64::from(args.count))
.order_by(cloud_crdt_operation::timestamp::order(SortOrder::Asc))
.include(cloud_crdt_include::include())
.include(cloud_crdt_with_instance::include())
.exec()
.await?;
ops.sort_by(|a, b| match a.timestamp().cmp(&b.timestamp()) {
Ordering::Equal => a.instance().cmp(&b.instance()),
cmp::Ordering::Equal => a.instance().cmp(&b.instance()),
o => o,
});
Ok(ops
.into_iter()
ops.into_iter()
.take(args.count as usize)
.map(|o| o.into_operation())
.collect())
.map(cloud_crdt_with_instance::Data::into_operation)
.collect()
}
}

View file

@ -2,27 +2,27 @@ mod mock_instance;
use sd_core_sync::*;
use sd_prisma::{prisma, prisma_sync};
use sd_prisma::{prisma::location, prisma_sync};
use sd_sync::*;
use sd_utils::{msgpack, uuid_to_bytes};
use mock_instance::Instance;
use tracing::info;
use tracing_test::traced_test;
use uuid::Uuid;
async fn write_test_location(
instance: &Instance,
) -> Result<prisma::location::Data, Box<dyn std::error::Error>> {
Ok(instance
const MOCK_LOCATION_NAME: &str = "Location 0";
const MOCK_LOCATION_PATH: &str = "/User/Anon/Documents";
async fn write_test_location(instance: &Instance) -> location::Data {
let location_pub_id = Uuid::new_v4();
let location = instance
.sync
.write_ops(&instance.db, {
let id = Uuid::new_v4();
let (sync_ops, db_ops): (Vec<_>, Vec<_>) = [
sync_db_entry!("Location 0".to_string(), prisma::location::name),
sync_db_entry!(
"/User/Brendan/Documents".to_string(),
prisma::location::path
),
sync_db_entry!(MOCK_LOCATION_NAME, location::name),
sync_db_entry!(MOCK_LOCATION_PATH, location::path),
]
.into_iter()
.unzip();
@ -30,21 +30,60 @@ async fn write_test_location(
(
instance.sync.shared_create(
prisma_sync::location::SyncId {
pub_id: uuid_to_bytes(&id),
pub_id: uuid_to_bytes(&location_pub_id),
},
sync_ops,
),
instance.db.location().create(uuid_to_bytes(&id), db_ops),
instance
.db
.location()
.create(uuid_to_bytes(&location_pub_id), db_ops),
)
})
.await?)
.await
.expect("failed to create mock location");
instance
.sync
.write_ops(&instance.db, {
let (sync_ops, db_ops): (Vec<_>, Vec<_>) = [
sync_db_entry!(1024, location::total_capacity),
sync_db_entry!(512, location::available_capacity),
]
.into_iter()
.unzip();
(
sync_ops
.into_iter()
.map(|(k, v)| {
instance.sync.shared_update(
prisma_sync::location::SyncId {
pub_id: uuid_to_bytes(&location_pub_id),
},
k,
v,
)
})
.collect::<Vec<_>>(),
instance
.db
.location()
.update(location::id::equals(location.id), db_ops),
)
})
.await
.expect("failed to create mock location");
location
}
#[tokio::test]
#[traced_test]
async fn writes_operations_and_rows_together() -> Result<(), Box<dyn std::error::Error>> {
let instance = Instance::new(Uuid::new_v4()).await;
write_test_location(&instance).await?;
write_test_location(&instance).await;
let operations = instance
.db
@ -57,27 +96,46 @@ async fn writes_operations_and_rows_together() -> Result<(), Box<dyn std::error:
assert_eq!(operations.len(), 3);
assert_eq!(operations[0].model, prisma_sync::location::MODEL_ID as i32);
let out = instance
.sync
.get_ops(GetOpsArgs {
clocks: vec![],
count: 100,
})
.await?;
assert_eq!(out.len(), 3);
let locations = instance.db.location().find_many(vec![]).exec().await?;
assert_eq!(locations.len(), 1);
let location = locations.first().unwrap();
assert_eq!(location.name, Some("Location 0".to_string()));
assert_eq!(location.path, Some("/User/Brendan/Documents".to_string()));
assert_eq!(location.name.as_deref(), Some(MOCK_LOCATION_NAME));
assert_eq!(location.path.as_deref(), Some(MOCK_LOCATION_PATH));
Ok(())
}
#[tokio::test]
#[traced_test]
async fn operations_send_and_ingest() -> Result<(), Box<dyn std::error::Error>> {
let instance1 = Instance::new(Uuid::new_v4()).await;
let instance2 = Instance::new(Uuid::new_v4()).await;
let mut instance2_sync_rx = instance2.sync_rx.resubscribe();
info!("Created instances!");
Instance::pair(&instance1, &instance2).await;
write_test_location(&instance1).await?;
info!("Paired instances!");
write_test_location(&instance1).await;
info!("Created mock location!");
assert!(matches!(
instance2.sync_rx.resubscribe().recv().await?,
instance2_sync_rx.recv().await?,
SyncMessage::Ingested
));
@ -89,6 +147,11 @@ async fn operations_send_and_ingest() -> Result<(), Box<dyn std::error::Error>>
})
.await?;
assert_locations_equality(
&instance1.db.location().find_many(vec![]).exec().await?[0],
&instance2.db.location().find_many(vec![]).exec().await?[0],
);
assert_eq!(out.len(), 3);
instance1.teardown().await;
@ -102,12 +165,14 @@ async fn no_update_after_delete() -> Result<(), Box<dyn std::error::Error>> {
let instance1 = Instance::new(Uuid::new_v4()).await;
let instance2 = Instance::new(Uuid::new_v4()).await;
let mut instance2_sync_rx = instance2.sync_rx.resubscribe();
Instance::pair(&instance1, &instance2).await;
let location = write_test_location(&instance1).await?;
let location = write_test_location(&instance1).await;
assert!(matches!(
instance2.sync_rx.resubscribe().recv().await?,
instance2_sync_rx.recv().await?,
SyncMessage::Ingested
));
@ -140,8 +205,7 @@ async fn no_update_after_delete() -> Result<(), Box<dyn std::error::Error>> {
),
instance1.db.location().find_many(vec![]),
)
.await
.ok();
.await?;
// one spare update operation that actually gets ignored by instance 2
assert_eq!(instance1.db.crdt_operation().count(vec![]).exec().await?, 5);
@ -156,3 +220,28 @@ async fn no_update_after_delete() -> Result<(), Box<dyn std::error::Error>> {
Ok(())
}
fn assert_locations_equality(l1: &location::Data, l2: &location::Data) {
assert_eq!(l1.pub_id, l2.pub_id, "pub id");
assert_eq!(l1.name, l2.name, "name");
assert_eq!(l1.path, l2.path, "path");
assert_eq!(l1.total_capacity, l2.total_capacity, "total capacity");
assert_eq!(
l1.available_capacity, l2.available_capacity,
"available capacity"
);
assert_eq!(l1.size_in_bytes, l2.size_in_bytes, "size in bytes");
assert_eq!(l1.is_archived, l2.is_archived, "is archived");
assert_eq!(
l1.generate_preview_media, l2.generate_preview_media,
"generate preview media"
);
assert_eq!(
l1.sync_preview_media, l2.sync_preview_media,
"sync preview media"
);
assert_eq!(l1.hidden, l2.hidden, "hidden");
assert_eq!(l1.date_created, l2.date_created, "date created");
assert_eq!(l1.scan_state, l2.scan_state, "scan state");
assert_eq!(l1.instance_id, l2.instance_id, "instance id");
}

View file

@ -1,11 +1,14 @@
use sd_core_sync::*;
use sd_prisma::prisma::{self};
use sd_prisma::prisma;
use sd_sync::CompressedCRDTOperations;
use sd_utils::uuid_to_bytes;
use prisma_client_rust::chrono::Utc;
use std::sync::{atomic::AtomicBool, Arc};
use tokio::sync::broadcast;
use prisma_client_rust::chrono::Utc;
use tokio::{fs, spawn, sync::broadcast};
use tracing::{info, instrument, warn, Instrument};
use uuid::Uuid;
fn db_path(id: Uuid) -> String {
@ -47,29 +50,30 @@ impl Instance {
.await
.unwrap();
let sync = sd_core_sync::Manager::new(
&db,
let (sync, sync_rx) = sd_core_sync::Manager::new(
Arc::clone(&db),
id,
&Arc::new(AtomicBool::new(true)),
Arc::new(AtomicBool::new(true)),
Default::default(),
&Default::default(),
)
.await;
.await
.expect("failed to create sync manager");
Arc::new(Self {
id,
db,
sync: Arc::new(sync.manager),
sync_rx: Arc::new(sync.rx),
sync: Arc::new(sync),
sync_rx: Arc::new(sync_rx),
})
}
pub async fn teardown(&self) {
tokio::fs::remove_file(db_path(self.id)).await.unwrap();
fs::remove_file(db_path(self.id)).await.unwrap();
}
pub async fn pair(left: &Arc<Self>, right: &Arc<Self>) {
async fn half(left: &Arc<Instance>, right: &Arc<Instance>) {
pub async fn pair(instance1: &Arc<Self>, instance2: &Arc<Self>) {
#[instrument(skip(left, right))]
async fn half(left: &Arc<Instance>, right: &Arc<Instance>, context: &'static str) {
left.db
.instance()
.create(
@ -84,34 +88,38 @@ impl Instance {
.await
.unwrap();
tokio::spawn({
let mut sync_rx_1 = left.sync_rx.resubscribe();
let instance2 = right.clone();
spawn({
let mut sync_rx_left = left.sync_rx.resubscribe();
let right = Arc::clone(right);
async move {
while let Ok(msg) = sync_rx_1.recv().await {
while let Ok(msg) = sync_rx_left.recv().await {
info!(?msg, "sync_rx_left received message");
if matches!(msg, SyncMessage::Created) {
instance2
right
.sync
.ingest
.event_tx
.send(ingest::Event::Notification)
.await
.unwrap();
info!("sent notification to instance 2");
}
}
}
.in_current_span()
});
tokio::spawn({
let instance1 = left.clone();
let instance2 = right.clone();
spawn({
let left = Arc::clone(left);
let right = Arc::clone(right);
async move {
while let Some(msg) = instance2.sync.ingest.req_rx.lock().await.recv().await {
while let Ok(msg) = right.sync.ingest.req_rx.recv().await {
info!(?msg, "right instance received request");
match msg {
ingest::Request::Messages { timestamps, .. } => {
let messages = instance1
ingest::Request::Messages { timestamps, tx } => {
let messages = left
.sync
.get_ops(GetOpsArgs {
clocks: timestamps,
@ -120,30 +128,34 @@ impl Instance {
.await
.unwrap();
let ingest = &instance2.sync.ingest;
let ingest = &right.sync.ingest;
ingest
.event_tx
.send(ingest::Event::Messages(ingest::MessagesEvent {
messages: CompressedCRDTOperations::new(messages),
has_more: false,
instance_id: instance1.id,
instance_id: left.id,
wait_tx: None,
}))
.await
.unwrap();
if tx.send(()).is_err() {
warn!("failed to send ack to instance 1");
}
}
ingest::Request::FinishedIngesting => {
right.sync.tx.send(SyncMessage::Ingested).unwrap();
}
// ingest::Request::Ingested => {
// instance2.sync.tx.send(SyncMessage::Ingested).ok();
// }
ingest::Request::FinishedIngesting => {}
}
}
}
.in_current_span()
});
}
half(left, right).await;
half(right, left).await;
half(instance1, instance2, "instance1 -> instance2").await;
half(instance2, instance1, "instance2 -> instance1").await;
}
}

View file

@ -28,6 +28,7 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
pub p2p_discovery: Option<P2PDiscoveryState>,
pub p2p_remote_access: Option<bool>,
pub p2p_manual_peers: Option<HashSet<String>>,
#[cfg(feature = "ai")]
pub image_labeler_version: Option<String>,
}
R.mutation(|node, args: ChangeNodeNameArgs| async move {
@ -115,30 +116,31 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
if let Some(model) = new_model {
let version = model.version().to_string();
tokio::spawn(async move {
let notification =
if let Some(image_labeller) = node.old_image_labeller.as_ref() {
if let Err(e) = image_labeller.change_model(model).await {
NotificationData {
title: String::from(
"Failed to change image detection model",
),
content: format!("Error: {e}"),
kind: NotificationKind::Error,
}
} else {
NotificationData {
title: String::from("Model download completed"),
content: format!("Sucessfuly loaded model: {version}"),
kind: NotificationKind::Success,
}
let notification = if let Some(image_labeller) =
node.old_image_labeller.as_ref()
{
if let Err(e) = image_labeller.change_model(model).await {
NotificationData {
title: String::from(
"Failed to change image detection model",
),
content: format!("Error: {e}"),
kind: NotificationKind::Error,
}
} else {
NotificationData {
title: String::from("Model download completed"),
content: format!("Successfully loaded model: {version}"),
kind: NotificationKind::Success,
}
}
} else {
NotificationData {
title: String::from("Failed to change image detection model"),
content: "The AI system is disabled due to a previous error. Contact support for help.".to_string(),
kind: NotificationKind::Success,
}
};
};
node.emit_notification(notification, None).await;
});
@ -184,7 +186,7 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
.procedure("updateThumbnailerPreferences", {
#[derive(Deserialize, Type)]
pub struct UpdateThumbnailerPreferences {
pub background_processing_percentage: u8, // 0-100
// pub background_processing_percentage: u8, // 0-100
}
R.mutation(
|node, UpdateThumbnailerPreferences { .. }: UpdateThumbnailerPreferences| async move {

View file

@ -152,7 +152,7 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
|(_, library), (id, args): (saved_search::id::Type, Args)| async move {
let Library { db, sync, .. } = library.as_ref();
let updated_at = Utc::now().into();
let updated_at = Utc::now();
let search = db
.saved_search()

View file

@ -51,7 +51,7 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
&library.sync,
library.config().await.instance_id,
)
.await;
.await?;
node.libraries
.edit(

View file

@ -1,14 +1,20 @@
use crate::cloud::sync::err_break;
use sd_prisma::prisma::cloud_crdt_operation;
use sd_sync::CompressedCRDTOperations;
use std::sync::{
atomic::{AtomicBool, Ordering},
Arc,
use std::{
pin::pin,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
};
use futures::StreamExt;
use tokio::sync::Notify;
use tracing::debug;
use crate::cloud::sync::err_break;
// Responsible for taking sync operations received from the cloud,
// and applying them to the local database via the sync system's ingest actor.
@ -23,7 +29,7 @@ pub async fn run_actor(
state_notify.notify_waiters();
{
let mut rx = sync.ingest.req_rx.lock().await;
let mut rx = pin!(sync.ingest.req_rx.clone());
if sync
.ingest
@ -32,9 +38,13 @@ pub async fn run_actor(
.await
.is_ok()
{
while let Some(req) = rx.recv().await {
while let Some(req) = rx.next().await {
const OPS_PER_REQUEST: u32 = 1000;
// FIXME: If there are exactly a multiple of OPS_PER_REQUEST operations,
// then this will bug, as we sent `has_more` as true, but we don't have
// more operations to send.
use sd_core_sync::*;
let timestamps = match req {

View file

@ -42,7 +42,10 @@ pub async fn declare_actors(
let active = state.send_active.clone();
let active_notifier = state.notifier.clone();
move || send::run_actor(library_id, sync, node, active, active_notifier)
move |_stop| {
// FIXME: Properly use the stop actor
send::run_actor(library_id, sync, node, active, active_notifier)
}
},
autorun,
)
@ -58,7 +61,8 @@ pub async fn declare_actors(
let active_notifier = state.notifier.clone();
let active = state.receive_active.clone();
move || {
move |_stop| {
// FIXME: Properly use the stop actor
receive::run_actor(
node.libraries.clone(),
db.clone(),
@ -83,7 +87,10 @@ pub async fn declare_actors(
let active = state.ingest_active.clone();
let active_notifier = state.notifier.clone();
move || ingest::run_actor(sync.clone(), ingest_notify, active, active_notifier)
move |_stop| {
// FIXME: Properly use the stop actor
ingest::run_actor(sync.clone(), ingest_notify, active, active_notifier)
}
},
autorun,
)

View file

@ -31,7 +31,7 @@ use tracing_appender::{
non_blocking::{NonBlocking, WorkerGuard},
rolling::{RollingFileAppender, Rotation},
};
use tracing_subscriber::{filter::FromEnvError, prelude::*, EnvFilter};
use tracing_subscriber::{filter::FromEnvError, prelude::*, registry, EnvFilter};
pub mod api;
mod cloud;
@ -254,12 +254,15 @@ impl Node {
);
}
tracing_subscriber::registry()
let registry = registry();
let registry = registry
.with(
tracing_subscriber::fmt::layer()
.with_file(true)
.with_line_number(true)
.with_ansi(false)
.with_target(true)
.with_writer(logfile)
.with_filter(EnvFilter::from_default_env()),
)
@ -269,8 +272,12 @@ impl Node {
.with_line_number(true)
.with_writer(std::io::stdout)
.with_filter(EnvFilter::from_default_env()),
)
.init();
);
#[cfg(target_os = "android")]
let registry = registry.with(tracing_android::layer("com.spacedrive.app").unwrap());
registry.init();
std::panic::set_hook(Box::new(move |panic| {
use std::backtrace::{Backtrace, BacktraceStatus};

View file

@ -46,6 +46,8 @@ pub enum LibraryManagerError {
FileIO(#[from] FileIOError),
#[error(transparent)]
LibraryConfig(#[from] LibraryConfigError),
#[error(transparent)]
Sync(#[from] sd_core_sync::Error),
}
impl From<LibraryManagerError> for rspc::Error {

View file

@ -8,14 +8,12 @@ use crate::{
Node,
};
use futures::future::join_all;
use sd_core_sync::SyncMessage;
use sd_p2p::{Identity, RemoteIdentity};
use sd_prisma::prisma::{crdt_operation, instance, location, SortOrder};
use sd_prisma::prisma::{instance, location};
use sd_utils::{
db,
error::{FileIOError, NonUtf8PathError},
from_bytes_to_uuid,
};
use std::{
@ -32,7 +30,7 @@ use std::{
use chrono::Utc;
use futures_concurrency::future::{Join, TryJoin};
use tokio::{
fs, io,
fs, io, spawn,
sync::{broadcast, RwLock},
time::sleep,
};
@ -392,31 +390,31 @@ impl Libraries {
&self,
instance: &RemoteIdentity,
) -> Option<Arc<Library>> {
join_all(
self.libraries
.read()
.await
.iter()
.map(|(_, library)| async move {
library
.db
.instance()
.find_many(vec![instance::remote_identity::equals(
instance.get_bytes().to_vec(),
)])
.exec()
.await
.ok()
.iter()
.flatten()
.filter_map(|i| RemoteIdentity::from_bytes(&i.remote_identity).ok())
.any(|i| i == *instance)
.then(|| Arc::clone(library))
}),
)
.await
.into_iter()
.find_map(|v| v)
self.libraries
.read()
.await
.iter()
.map(|(_, library)| async move {
library
.db
.instance()
.find_many(vec![instance::remote_identity::equals(
instance.get_bytes().to_vec(),
)])
.exec()
.await
.ok()
.iter()
.flatten()
.filter_map(|i| RemoteIdentity::from_bytes(&i.remote_identity).ok())
.any(|i| i == *instance)
.then(|| Arc::clone(library))
})
.collect::<Vec<_>>()
.join()
.await
.into_iter()
.find_map(|v| v)
}
// get_ctx will return the library context for the given library id.
@ -529,38 +527,15 @@ impl Libraries {
let actors = Default::default();
let sync = sync::Manager::new(
&db,
let (sync, sync_rx) = sync::Manager::with_existing_instances(
Arc::clone(&db),
instance_id,
&config.generate_sync_operations,
{
db._batch(
instances
.iter()
.map(|i| {
db.crdt_operation()
.find_first(vec![crdt_operation::instance::is(vec![
instance::id::equals(i.id),
])])
.order_by(crdt_operation::timestamp::order(SortOrder::Desc))
})
.collect::<Vec<_>>(),
)
.await?
.into_iter()
.zip(&instances)
.map(|(op, i)| {
(
from_bytes_to_uuid(&i.pub_id),
sd_sync::NTP64(op.map(|o| o.timestamp).unwrap_or_default() as u64),
)
})
.collect()
},
&actors,
Arc::clone(&config.generate_sync_operations),
&instances,
Arc::clone(&actors),
)
.await;
let sync_manager = Arc::new(sync.manager);
.await?;
let sync_manager = Arc::new(sync);
let cloud = crate::cloud::start(node, &actors, id, instance_id, &sync_manager, &db).await;
@ -581,7 +556,7 @@ impl Libraries {
.await;
// This is an exception. Generally subscribe to this by `self.tx.subscribe`.
tokio::spawn(sync_rx_actor(library.clone(), node.clone(), sync.rx));
spawn(sync_rx_actor(library.clone(), node.clone(), sync_rx));
self.tx
.emit(LibraryManagerEvent::Load(library.clone()))
@ -616,7 +591,7 @@ impl Libraries {
error!(?e, "Failed to resume jobs for library;");
}
tokio::spawn({
spawn({
let this = self.clone();
let node = node.clone();
let library = library.clone();

View file

@ -78,6 +78,8 @@ pub enum LocationError {
MissingField(#[from] MissingFieldError),
#[error("invalid location scan state value: {0}")]
InvalidScanStateValue(i32),
#[error(transparent)]
Sync(#[from] sd_core_sync::Error),
}
impl From<LocationError> for rspc::Error {

View file

@ -94,6 +94,8 @@ pub enum LocationManagerError {
JobSystem(#[from] sd_core_heavy_lifting::Error),
#[error(transparent)]
FileIO(#[from] FileIOError),
#[error(transparent)]
Sync(#[from] sd_core_sync::Error),
}
type OnlineLocations = BTreeSet<Vec<u8>>;

View file

@ -3,7 +3,7 @@ use crate::library::Library;
use sd_prisma::{prisma::tag, prisma_sync};
use sd_sync::*;
use chrono::{DateTime, FixedOffset, Utc};
use chrono::Utc;
use serde::Deserialize;
use specta::Type;
use uuid::Uuid;
@ -20,15 +20,14 @@ impl TagCreateArgs {
pub async fn exec(
self,
Library { db, sync, .. }: &Library,
) -> prisma_client_rust::Result<tag::Data> {
) -> Result<tag::Data, sd_core_sync::Error> {
let pub_id = Uuid::new_v4().as_bytes().to_vec();
let date_created: DateTime<FixedOffset> = Utc::now().into();
let (sync_params, db_params): (Vec<_>, Vec<_>) = [
sync_db_entry!(self.name, tag::name),
sync_db_entry!(self.color, tag::color),
sync_db_entry!(false, tag::is_hidden),
sync_db_entry!(date_created, tag::date_created),
sync_db_entry!(Utc::now(), tag::date_created),
]
.into_iter()
.unzip();

View file

@ -4,7 +4,7 @@ use super::TagCreateArgs;
/// Seeds tags in a new library.
/// Shouldn't be called more than once!
pub async fn new_library(library: &Library) -> prisma_client_rust::Result<()> {
pub async fn new_library(library: &Library) -> Result<(), sd_core_sync::Error> {
// remove type after tags are added
let tags = [

View file

@ -54,6 +54,8 @@ pub enum JobError {
Timeout(Duration),
#[error("critical job error: {0}")]
Critical(&'static str),
#[error(transparent)]
Sync(#[from] sd_core_sync::Error),
// Specific job errors
#[error(transparent)]

View file

@ -135,7 +135,10 @@ mod originator {
pub use responder::run as responder;
mod responder {
use std::pin::pin;
use super::*;
use futures::StreamExt;
use originator::tx as rx;
pub mod tx {
@ -196,30 +199,15 @@ mod responder {
stream: &mut (impl AsyncRead + AsyncWrite + Unpin),
library: Arc<Library>,
) -> Result<(), ()> {
let ingest = &library.sync.ingest;
async fn early_return(stream: &mut (impl AsyncRead + AsyncWrite + Unpin)) {
// TODO: Proper error returned to remote instead of this.
// TODO: We can't just abort the connection when the remote is expecting data.
stream
.write_all(&tx::MainRequest::Done.to_bytes())
.await
.unwrap();
stream.flush().await.unwrap();
}
let Ok(mut rx) = ingest.req_rx.try_lock() else {
warn!("Rejected sync due to libraries lock being held!");
early_return(stream).await;
return Ok(());
};
use sync::ingest::*;
let ingest = &library.sync.ingest;
ingest.event_tx.send(Event::Notification).await.unwrap();
while let Some(req) = rx.recv().await {
let mut rx = pin!(ingest.req_rx.clone());
while let Some(req) = rx.next().await {
const OPS_PER_REQUEST: u32 = 1000;
let timestamps = match req {
@ -245,6 +233,10 @@ mod responder {
let (wait_tx, wait_rx) = tokio::sync::oneshot::channel::<()>();
// FIXME: If there are exactly a multiple of OPS_PER_REQUEST operations,
// then this will bug, as we sent `has_more` as true, but we don't have
// more operations to send.
ingest
.event_tx
.send(Event::Messages(MessagesEvent {

View file

@ -6,5 +6,8 @@ edition.workspace = true
repository.workspace = true
[dependencies]
async-channel = { workspace = true }
futures = { workspace = true }
pin-project-lite = { workspace = true }
tokio = { workspace = true }
tracing = { workspace = true }

View file

@ -1,34 +1,93 @@
use futures::Future;
use std::{collections::HashMap, pin::Pin, sync::Arc};
use tokio::{
sync::{broadcast, oneshot, Mutex},
task::AbortHandle,
#![warn(
clippy::all,
clippy::pedantic,
clippy::correctness,
clippy::perf,
clippy::style,
clippy::suspicious,
clippy::complexity,
clippy::nursery,
clippy::unwrap_used,
unused_qualifications,
rust_2018_idioms,
trivial_casts,
trivial_numeric_casts,
unused_allocation,
clippy::unnecessary_cast,
clippy::cast_lossless,
clippy::cast_possible_truncation,
clippy::cast_possible_wrap,
clippy::cast_precision_loss,
clippy::cast_sign_loss,
clippy::dbg_macro,
clippy::deprecated_cfg_attr,
clippy::separated_literal_suffix,
deprecated
)]
#![forbid(deprecated_in_future)]
#![allow(clippy::missing_errors_doc, clippy::module_name_repetitions)]
use std::{
collections::HashMap,
future::{Future, IntoFuture},
panic::{panic_any, AssertUnwindSafe},
pin::Pin,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
task::{Context, Poll},
time::Duration,
};
use async_channel as chan;
use futures::FutureExt;
use tokio::{
spawn,
sync::{broadcast, RwLock},
task::JoinHandle,
time::timeout,
};
use tracing::{error, instrument, warn};
const ONE_MINUTE: Duration = Duration::from_secs(60);
type ActorFn = dyn Fn(Stopper) -> Pin<Box<dyn Future<Output = ()> + Send>> + Send + Sync;
pub struct Actor {
pub abort_handle: Mutex<Option<AbortHandle>>,
pub spawn_fn: Arc<dyn Fn() -> Pin<Box<dyn Future<Output = ()> + Send>> + Send + Sync>,
spawn_fn: Arc<ActorFn>,
maybe_handle: Option<JoinHandle<()>>,
is_running: Arc<AtomicBool>,
stop_tx: chan::Sender<()>,
stop_rx: chan::Receiver<()>,
}
pub struct Actors {
pub invalidate_rx: broadcast::Receiver<()>,
invalidate_tx: broadcast::Sender<()>,
actors: Arc<Mutex<HashMap<String, Arc<Actor>>>>,
actors: Arc<RwLock<HashMap<&'static str, Actor>>>,
}
impl Actors {
pub async fn declare<F: Future<Output = ()> + Send + 'static>(
pub async fn declare<Fut>(
self: &Arc<Self>,
name: &str,
actor_fn: impl FnOnce() -> F + Send + Sync + Clone + 'static,
name: &'static str,
actor_fn: impl FnOnce(Stopper) -> Fut + Send + Sync + Clone + 'static,
autostart: bool,
) {
self.actors.lock().await.insert(
name.to_string(),
Arc::new(Actor {
abort_handle: Default::default(),
spawn_fn: Arc::new(move || Box::pin((actor_fn.clone())()) as Pin<Box<_>>),
}),
) where
Fut: Future<Output = ()> + Send + 'static,
{
let (stop_tx, stop_rx) = chan::bounded(1);
self.actors.write().await.insert(
name,
Actor {
spawn_fn: Arc::new(move |stop| Box::pin((actor_fn.clone())(stop))),
maybe_handle: None,
is_running: Arc::new(AtomicBool::new(false)),
stop_tx,
stop_rx,
},
);
if autostart {
@ -36,86 +95,162 @@ impl Actors {
}
}
#[instrument(skip(self))]
pub async fn start(self: &Arc<Self>, name: &str) {
let name = name.to_string();
let actors = self.actors.lock().await;
let Some(actor) = actors.get(&name).cloned() else {
return;
};
let mut abort_handle = actor.abort_handle.lock().await;
if abort_handle.is_some() {
return;
}
let (tx, rx) = oneshot::channel();
let invalidate_tx = self.invalidate_tx.clone();
let spawn_fn = actor.spawn_fn.clone();
let task = tokio::spawn(async move {
(spawn_fn)().await;
tx.send(()).ok();
});
*abort_handle = Some(task.abort_handle());
invalidate_tx.send(()).ok();
tokio::spawn({
let actor = actor.clone();
async move {
#[allow(clippy::match_single_binding)]
match rx.await {
_ => {}
};
actor.abort_handle.lock().await.take();
invalidate_tx.send(()).ok();
if let Some(actor) = self.actors.write().await.get_mut(name) {
if actor.is_running.load(Ordering::Acquire) {
warn!("Actor already running!");
return;
}
});
let invalidate_tx = self.invalidate_tx.clone();
let is_running = Arc::clone(&actor.is_running);
is_running.store(true, Ordering::Release);
if invalidate_tx.send(()).is_err() {
warn!("Failed to send invalidate signal");
}
if let Some(handle) = actor.maybe_handle.take() {
if handle.await.is_err() {
// This should never happen, as we're trying to catch the panic below with
// `catch_unwind`.
error!("Actor unexpectedly panicked");
}
}
actor.maybe_handle = Some(spawn({
let spawn_fn = Arc::clone(&actor.spawn_fn);
let stop_actor = Stopper(actor.stop_rx.clone());
async move {
if (AssertUnwindSafe((spawn_fn)(stop_actor)))
.catch_unwind()
.await
.is_err()
{
error!("Actor unexpectedly panicked");
}
is_running.store(false, Ordering::Release);
if invalidate_tx.send(()).is_err() {
warn!("Failed to send invalidate signal");
}
}
}));
}
}
#[instrument(skip(self))]
pub async fn stop(self: &Arc<Self>, name: &str) {
let name = name.to_string();
let actors = self.actors.lock().await;
if let Some(actor) = self.actors.write().await.get_mut(name) {
if !actor.is_running.load(Ordering::Acquire) {
warn!("Actor already stopped!");
return;
}
let Some(actor) = actors.get(&name).cloned() else {
return;
};
if actor.stop_tx.send(()).await.is_ok() {
wait_stop_or_abort(actor.maybe_handle.take()).await;
let mut abort_handle = actor.abort_handle.lock().await;
if let Some(abort_handle) = abort_handle.take() {
abort_handle.abort();
assert!(
!actor.is_running.load(Ordering::Acquire),
"actor handle finished without setting actor to stopped"
);
} else {
error!("Failed to send stop signal to actor, will check if it's already stopped or abort otherwise");
wait_stop_or_abort(actor.maybe_handle.take()).await;
}
}
}
pub async fn get_state(&self) -> HashMap<String, bool> {
let actors = self.actors.lock().await;
let mut state = HashMap::new();
for (name, actor) in &*actors {
state.insert(name.to_string(), actor.abort_handle.lock().await.is_some());
}
state
self.actors
.read()
.await
.iter()
.map(|(&name, actor)| (name.to_string(), actor.is_running.load(Ordering::Relaxed)))
.collect()
}
}
impl Default for Actors {
fn default() -> Self {
let actors = Default::default();
let (invalidate_tx, invalidate_rx) = broadcast::channel(1);
Self {
actors,
actors: Arc::default(),
invalidate_rx,
invalidate_tx,
}
}
}
pub struct Stopper(chan::Receiver<()>);
impl Stopper {
#[must_use]
pub fn check_stop(&self) -> bool {
self.0.try_recv().is_ok()
}
}
pin_project_lite::pin_project! {
pub struct StopActorFuture<'recv> {
#[pin]
fut: chan::Recv<'recv, ()>,
}
}
impl Future for StopActorFuture<'_> {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
match this.fut.poll(cx) {
Poll::Ready(res) => {
if res.is_err() {
warn!("StopActor channel closed, will stop actor");
}
Poll::Ready(())
}
Poll::Pending => Poll::Pending,
}
}
}
impl<'recv> IntoFuture for &'recv Stopper {
type Output = ();
type IntoFuture = StopActorFuture<'recv>;
fn into_future(self) -> Self::IntoFuture {
Self::IntoFuture { fut: self.0.recv() }
}
}
async fn wait_stop_or_abort(maybe_handle: Option<JoinHandle<()>>) {
if let Some(handle) = maybe_handle {
let abort_handle = handle.abort_handle();
match timeout(ONE_MINUTE, handle).await {
Ok(Ok(())) => { /* Everything is Awesome! */ }
Ok(Err(e)) => {
// This should never happen, as we're trying to catch the panic with
// `catch_unwind`.
if e.is_panic() {
let p = e.into_panic();
error!("Actor unexpectedly panicked, we will pop up the panic!");
panic_any(p);
}
}
Err(_) => {
error!("Actor failed to gracefully stop in the allotted time, will force abortion");
abort_handle.abort();
}
}
}
}

View file

@ -51,4 +51,6 @@ pub enum ImageLabelerError {
DownloadModel(#[from] DownloadModelError),
#[error(transparent)]
FileIO(#[from] FileIOError),
#[error(transparent)]
Sync(#[from] sd_core_sync::Error),
}

View file

@ -30,7 +30,7 @@ pub const HEIF_EXTENSIONS: [&str; 8] = [
/// This is the target pixel count for all SVG images to be rendered at.
///
/// It is 512x512, but if the SVG has a non-1:1 aspect ratio we need to account for that.
pub const SVG_TARGET_PX: f32 = 262_144_f32;
pub const SVG_TARGET_PX: f32 = 262_144f32;
/// The size that PDF pages are rendered at.
///

View file

@ -10,14 +10,21 @@
clippy::unwrap_used,
unused_qualifications,
rust_2018_idioms,
clippy::expect_used,
trivial_casts,
trivial_numeric_casts,
unused_allocation,
clippy::as_conversions,
clippy::dbg_macro
clippy::unnecessary_cast,
clippy::cast_lossless,
clippy::cast_possible_truncation,
clippy::cast_possible_wrap,
clippy::cast_precision_loss,
clippy::cast_sign_loss,
clippy::dbg_macro,
clippy::deprecated_cfg_attr,
clippy::separated_literal_suffix,
deprecated
)]
#![forbid(unsafe_code)]
#![forbid(deprecated_in_future)]
#![allow(clippy::missing_errors_doc, clippy::module_name_repetitions)]
use std::{fs, path::Path};

View file

@ -10,17 +10,19 @@ pub enum AttributeFieldValue<'a> {
#[allow(unused)]
impl AttributeFieldValue<'_> {
pub fn as_single(&self) -> Option<&str> {
match self {
AttributeFieldValue::Single(field) => Some(field),
_ => None,
pub const fn as_single(&self) -> Option<&str> {
if let AttributeFieldValue::Single(field) = self {
Some(field)
} else {
None
}
}
pub fn as_list(&self) -> Option<&Vec<&str>> {
match self {
AttributeFieldValue::List(fields) => Some(fields),
_ => None,
pub const fn as_list(&self) -> Option<&Vec<&str>> {
if let AttributeFieldValue::List(fields) = self {
Some(fields)
} else {
None
}
}
}
@ -36,12 +38,14 @@ impl<'a> Attribute<'a> {
parser::parse(input).map(|(_, a)| a).map_err(|_| ())
}
pub fn field(&self, name: &str) -> Option<&AttributeFieldValue> {
self.fields.iter().find(|(n, _)| *n == name).map(|(_, v)| v)
pub fn field(&self, name: &str) -> Option<&AttributeFieldValue<'_>> {
self.fields
.iter()
.find_map(|(n, v)| (*n == name).then_some(v))
}
}
pub fn model_attributes(model: ModelWalker) -> Vec<Attribute> {
pub fn model_attributes(model: ModelWalker<'_>) -> Vec<Attribute<'_>> {
model
.ast_model()
.documentation()

View file

@ -1,11 +1,11 @@
use nom::{
branch::alt,
bytes::complete::*,
character::complete::*,
combinator::*,
bytes::complete::{is_not, tag},
character::complete::{alpha1, char, multispace0},
combinator::{map, opt},
error::{ErrorKind, ParseError},
multi::*,
sequence::*,
multi::separated_list1,
sequence::{delimited, separated_pair},
AsChar, IResult, InputTakeAtPosition,
};
@ -24,7 +24,7 @@ fn parens(input: &str) -> IResult<&str, &str> {
delimited(char('('), is_not(")"), char(')'))(input)
}
fn single_value<T, E: ParseError<T>>(i: T) -> IResult<T, T, E>
fn single_value<T, E: ParseError<T>>(i: &T) -> IResult<T, T, E>
where
T: InputTakeAtPosition,
<T as InputTakeAtPosition>::Item: AsChar,
@ -41,19 +41,19 @@ where
fn list_value(input: &str) -> IResult<&str, Vec<&str>> {
delimited(
char('['),
separated_list1(char(','), remove_ws(single_value)),
separated_list1(char(','), remove_ws(|a| single_value(&a))),
char(']'),
)(input)
}
fn attribute_field_value(input: &str) -> IResult<&str, AttributeFieldValue> {
fn attribute_field_value(input: &str) -> IResult<&str, AttributeFieldValue<'_>> {
remove_ws(alt((
map(list_value, AttributeFieldValue::List),
map(single_value, AttributeFieldValue::Single),
map(|a| list_value(a), AttributeFieldValue::List),
map(|a| single_value(&a), AttributeFieldValue::Single),
)))(input)
}
fn attribute_field(input: &str) -> IResult<&str, (&str, AttributeFieldValue)> {
fn attribute_field(input: &str) -> IResult<&str, (&str, AttributeFieldValue<'_>)> {
remove_ws(separated_pair(
remove_ws(is_not(":")),
char(':'),
@ -61,11 +61,11 @@ fn attribute_field(input: &str) -> IResult<&str, (&str, AttributeFieldValue)> {
))(input)
}
fn attribute_fields(input: &str) -> IResult<&str, Vec<(&str, AttributeFieldValue)>> {
fn attribute_fields(input: &str) -> IResult<&str, Vec<(&str, AttributeFieldValue<'_>)>> {
separated_list1(char(','), attribute_field)(input)
}
pub fn parse(input: &str) -> IResult<&str, Attribute> {
pub fn parse(input: &str) -> IResult<&str, Attribute<'_>> {
let (input, _) = remove_ws(tag("@"))(input)?;
let (input, name) = alpha1(input)?;
let (input, values_str) = opt(remove_ws(parens))(input)?;
@ -86,7 +86,7 @@ mod test {
fn marker() {
let s = "@local";
let (remaining, attribute) = super::parse(s).unwrap();
let (remaining, attribute) = parse(s).unwrap();
assert_eq!(remaining, "");
assert_eq!(attribute.name, "local");
@ -97,7 +97,7 @@ mod test {
fn single() {
let s = "@local(foo: bar)";
let (remaining, attribute) = super::parse(s).unwrap();
let (remaining, attribute) = parse(s).unwrap();
assert_eq!(remaining, "");
assert_eq!(attribute.name, "local");
@ -113,7 +113,7 @@ mod test {
fn list() {
let s = "@local(foo: [bar, baz])";
let (remaining, attribute) = match super::parse(s) {
let (remaining, attribute) = match parse(s) {
Ok(v) => v,
Err(e) => panic!("{}", e),
};
@ -136,7 +136,7 @@ mod test {
fn multiple() {
let s = "@local(foo: bar, baz: qux)";
let (remaining, attribute) = super::parse(s).unwrap();
let (remaining, attribute) = parse(s).unwrap();
assert_eq!(remaining, "");
assert_eq!(attribute.name, "local");

View file

@ -1,8 +1,31 @@
mod attribute;
mod model;
mod sync_data;
use attribute::*;
#![warn(
clippy::all,
clippy::pedantic,
clippy::correctness,
clippy::perf,
clippy::style,
clippy::suspicious,
clippy::complexity,
clippy::nursery,
clippy::unwrap_used,
unused_qualifications,
rust_2018_idioms,
trivial_casts,
trivial_numeric_casts,
unused_allocation,
clippy::unnecessary_cast,
clippy::cast_lossless,
clippy::cast_possible_truncation,
clippy::cast_possible_wrap,
clippy::cast_precision_loss,
clippy::cast_sign_loss,
clippy::dbg_macro,
clippy::deprecated_cfg_attr,
clippy::separated_literal_suffix,
deprecated
)]
#![forbid(deprecated_in_future)]
#![allow(clippy::missing_errors_doc, clippy::module_name_repetitions)]
use prisma_client_rust_sdk::{
prelude::*,
@ -11,6 +34,12 @@ use prisma_client_rust_sdk::{
},
};
mod attribute;
mod model;
mod sync_data;
use attribute::{model_attributes, Attribute, AttributeFieldValue};
#[derive(Debug, serde::Serialize, thiserror::Error)]
enum Error {}
@ -38,7 +67,7 @@ pub enum ModelSyncType<'a> {
}
impl<'a> ModelSyncType<'a> {
fn from_attribute(attr: Attribute, model: ModelWalker<'a>) -> Option<Self> {
fn from_attribute(attr: &Attribute<'_>, model: ModelWalker<'a>) -> Option<Self> {
Some(match attr.name {
"local" | "shared" => {
let id = attr
@ -69,14 +98,15 @@ impl<'a> ModelSyncType<'a> {
AttributeFieldValue::List(_) => None,
})
.and_then(|name| {
match model
if let RefinedFieldWalker::Relation(r) = model
.fields()
.find(|f| f.name() == name)
.unwrap_or_else(|| panic!("'{name}' field not found"))
.refine()
{
RefinedFieldWalker::Relation(r) => Some(r),
_ => None,
Some(r)
} else {
None
}
})
.unwrap_or_else(|| panic!("'{name}' must be a relation field"))
@ -96,11 +126,10 @@ impl<'a> ModelSyncType<'a> {
})
}
fn sync_id(&self) -> Vec<FieldWalker> {
fn sync_id(&self) -> Vec<FieldWalker<'_>> {
match self {
// Self::Owned { id } => id.clone(),
Self::Local { id, .. } => vec![*id],
Self::Shared { id, .. } => vec![*id],
Self::Local { id, .. } | Self::Shared { id, .. } => vec![*id],
Self::Relation { group, item, .. } => vec![(*group).into(), (*item).into()],
}
}
@ -127,7 +156,7 @@ impl PrismaGenerator for SDSyncGenerator {
type Error = Error;
fn generate(self, args: GenerateArgs) -> Result<Module, Self::Error> {
fn generate(self, args: GenerateArgs<'_>) -> Result<Module, Self::Error> {
let db = &args.schema.db;
let models_with_sync_types = db
@ -136,13 +165,13 @@ impl PrismaGenerator for SDSyncGenerator {
.map(|(model, attributes)| {
let sync_type = attributes
.into_iter()
.find_map(|a| ModelSyncType::from_attribute(a, model));
.find_map(|a| ModelSyncType::from_attribute(&a, model));
(model, sync_type)
})
.collect::<Vec<_>>();
let model_sync_data = sync_data::r#enum(models_with_sync_types.clone());
let model_sync_data = sync_data::enumerate(&models_with_sync_types);
let mut module = Module::new(
"root",

View file

@ -1,13 +1,14 @@
use prisma_client_rust_sdk::{prelude::*, prisma::prisma_models::walkers::RefinedFieldWalker};
use prisma_models::{ast::ModelId, walkers::Walker};
use crate::{ModelSyncType, ModelWithSyncType};
pub fn module((model, sync_type): ModelWithSyncType) -> Module {
pub fn module((model, sync_type): ModelWithSyncType<'_>) -> Module {
let model_name_snake = snake_ident(model.name());
let sync_id = sync_type.as_ref().map(|sync_type| {
let fields = sync_type.sync_id();
let fields = fields.iter().flat_map(|field| {
let fields = fields.iter().map(|field| {
let name_snake = snake_ident(field.name());
let typ = match field.refine() {
@ -18,58 +19,10 @@ pub fn module((model, sync_type): ModelWithSyncType) -> Module {
}
};
Some(quote!(pub #name_snake: #typ))
quote!(pub #name_snake: #typ)
});
let model_stuff = match sync_type {
ModelSyncType::Relation {
item,
group,
model_id,
} => {
let item_name_snake = snake_ident(item.name());
let item_model_name_snake = snake_ident(item.related_model().name());
let group_name_snake = snake_ident(group.name());
let group_model_name_snake = snake_ident(group.related_model().name());
Some(quote! {
impl sd_sync::RelationSyncId for SyncId {
type ItemSyncId = super::#item_model_name_snake::SyncId;
type GroupSyncId = super::#group_model_name_snake::SyncId;
fn split(&self) -> (&Self::ItemSyncId, &Self::GroupSyncId) {
(
&self.#item_name_snake,
&self.#group_name_snake
)
}
}
pub const MODEL_ID: u16 = #model_id;
impl sd_sync::SyncModel for #model_name_snake::Types {
const MODEL_ID: u16 = MODEL_ID;
}
impl sd_sync::RelationSyncModel for #model_name_snake::Types {
type SyncId = SyncId;
}
})
}
ModelSyncType::Shared { model_id, .. } => Some(quote! {
pub const MODEL_ID: u16 = #model_id;
impl sd_sync::SyncModel for #model_name_snake::Types {
const MODEL_ID: u16 = MODEL_ID;
}
impl sd_sync::SharedSyncModel for #model_name_snake::Types {
type SyncId = SyncId;
}
}),
_ => None,
};
let model_stuff = parse_model(sync_type, &model_name_snake);
quote! {
#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)]
@ -101,8 +54,9 @@ pub fn module((model, sync_type): ModelWithSyncType) -> Module {
let relation_model_name_snake =
snake_ident(relation_field.related_model().name());
match relation_field.referenced_fields() {
Some(i) => {
relation_field.referenced_fields().map_or_else(
|| None,
|i| {
if i.count() == 1 {
Some(quote! {{
let val: std::collections::HashMap<String, rmpv::Value> = ::rmpv::ext::from_value(val).unwrap();
@ -115,17 +69,17 @@ pub fn module((model, sync_type): ModelWithSyncType) -> Module {
} else {
None
}
}
_ => None,
}
},
)
}
}
.map(|body| quote!(#model_name_snake::#field_name_snake::NAME => #body))
});
match field_matches.clone().count() {
0 => quote!(),
_ => quote! {
if field_matches.clone().count() == 0 {
quote!()
} else {
quote! {
impl #model_name_snake::SetParam {
pub fn deserialize(field: &str, val: ::rmpv::Value) -> Option<Self> {
Some(match field {
@ -134,41 +88,11 @@ pub fn module((model, sync_type): ModelWithSyncType) -> Module {
})
}
}
},
}
}
};
let unique_param_impl = {
let field_matches = model
.unique_criterias()
.flat_map(|criteria| match &criteria.fields().next() {
Some(field) if criteria.fields().len() == 1 => {
let field_name_snake = snake_ident(field.name());
Some(quote!(#model_name_snake::#field_name_snake::NAME =>
#model_name_snake::#field_name_snake::equals(
::rmpv::ext::from_value(val).unwrap()
),
))
}
_ => None,
})
.collect::<Vec<_>>();
match field_matches.len() {
0 => quote!(),
_ => quote! {
impl #model_name_snake::UniqueWhereParam {
pub fn deserialize(field: &str, val: ::rmpv::Value) -> Option<Self> {
Some(match field {
#(#field_matches)*
_ => return None
})
}
}
},
}
};
let unique_param_impl = process_unique_params(model, &model_name_snake);
Module::new(
model.name(),
@ -184,3 +108,90 @@ pub fn module((model, sync_type): ModelWithSyncType) -> Module {
},
)
}
#[inline]
fn parse_model(sync_type: &ModelSyncType<'_>, model_name_snake: &Ident) -> Option<TokenStream> {
match sync_type {
ModelSyncType::Relation {
item,
group,
model_id,
} => {
let item_name_snake = snake_ident(item.name());
let item_model_name_snake = snake_ident(item.related_model().name());
let group_name_snake = snake_ident(group.name());
let group_model_name_snake = snake_ident(group.related_model().name());
Some(quote! {
impl sd_sync::RelationSyncId for SyncId {
type ItemSyncId = super::#item_model_name_snake::SyncId;
type GroupSyncId = super::#group_model_name_snake::SyncId;
fn split(&self) -> (&Self::ItemSyncId, &Self::GroupSyncId) {
(
&self.#item_name_snake,
&self.#group_name_snake
)
}
}
pub const MODEL_ID: u16 = #model_id;
impl sd_sync::SyncModel for #model_name_snake::Types {
const MODEL_ID: u16 = MODEL_ID;
}
impl sd_sync::RelationSyncModel for #model_name_snake::Types {
type SyncId = SyncId;
}
})
}
ModelSyncType::Shared { model_id, .. } => Some(quote! {
pub const MODEL_ID: u16 = #model_id;
impl sd_sync::SyncModel for #model_name_snake::Types {
const MODEL_ID: u16 = MODEL_ID;
}
impl sd_sync::SharedSyncModel for #model_name_snake::Types {
type SyncId = SyncId;
}
}),
ModelSyncType::Local { .. } => None,
}
}
#[inline]
fn process_unique_params(model: Walker<'_, ModelId>, model_name_snake: &Ident) -> TokenStream {
let field_matches = model
.unique_criterias()
.filter_map(|criteria| match &criteria.fields().next() {
Some(field) if criteria.fields().len() == 1 => {
let field_name_snake = snake_ident(field.name());
Some(quote!(#model_name_snake::#field_name_snake::NAME =>
#model_name_snake::#field_name_snake::equals(
::rmpv::ext::from_value(val).unwrap()
),
))
}
_ => None,
})
.collect::<Vec<_>>();
if field_matches.is_empty() {
quote!()
} else {
quote! {
impl #model_name_snake::UniqueWhereParam {
pub fn deserialize(field: &str, val: ::rmpv::Value) -> Option<Self> {
Some(match field {
#(#field_matches)*
_ => return None
})
}
}
}
}
}

View file

@ -2,10 +2,11 @@ use prisma_client_rust_sdk::{
prelude::*,
prisma::prisma_models::walkers::{RefinedFieldWalker, RelationFieldWalker},
};
use prisma_models::walkers::{FieldWalker, ScalarFieldWalker};
use crate::{ModelSyncType, ModelWithSyncType};
pub fn r#enum(models: Vec<ModelWithSyncType>) -> TokenStream {
pub fn enumerate(models: &[ModelWithSyncType<'_>]) -> TokenStream {
let (variants, matches): (Vec<_>, Vec<_>) = models
.iter()
.filter_map(|(model, sync_type)| {
@ -38,193 +39,12 @@ pub fn r#enum(models: Vec<ModelWithSyncType>) -> TokenStream {
let match_arms = match sync_type.as_ref()? {
ModelSyncType::Shared { id, model_id } => {
let (get_id, equals_value, id_name_snake, create_id) = match id.refine() {
RefinedFieldWalker::Relation(rel) => {
let scalar_field = rel.fields().unwrap().next().unwrap();
let id_name_snake = snake_ident(scalar_field.name());
let field_name_snake = snake_ident(rel.name());
let opposite_model_name_snake =
snake_ident(rel.opposite_relation_field().unwrap().model().name());
let relation_equals_condition = quote!(prisma::#opposite_model_name_snake::pub_id::equals(
id.#field_name_snake.pub_id.clone()
));
let rel_fetch = quote! {
let rel = db.#opposite_model_name_snake()
.find_unique(#relation_equals_condition)
.exec()
.await?
.unwrap();
};
(
Some(rel_fetch),
quote!(rel.id),
id_name_snake,
relation_equals_condition,
)
}
RefinedFieldWalker::Scalar(s) => {
let field_name_snake = snake_ident(s.name());
let thing = quote!(id.#field_name_snake.clone());
(None, thing.clone(), field_name_snake, thing)
}
};
quote! {
#get_id
match data {
sd_sync::CRDTOperationData::Create(data) => {
let data: Vec<_> = data.into_iter().map(|(field, value)| {
prisma::#model_name_snake::SetParam::deserialize(&field, value).unwrap()
}).collect();
db.#model_name_snake()
.upsert(
prisma::#model_name_snake::#id_name_snake::equals(#equals_value),
prisma::#model_name_snake::create(#create_id, data.clone()),
data
)
.exec()
.await?;
},
sd_sync::CRDTOperationData::Update { field, value } => {
let data = vec![
prisma::#model_name_snake::SetParam::deserialize(&field, value).unwrap()
];
db.#model_name_snake()
.upsert(
prisma::#model_name_snake::#id_name_snake::equals(#equals_value),
prisma::#model_name_snake::create(#create_id, data.clone()),
data,
)
.exec()
.await?;
},
sd_sync::CRDTOperationData::Delete => {
db.#model_name_snake()
.delete(prisma::#model_name_snake::#id_name_snake::equals(#equals_value))
.exec()
.await?;
db.crdt_operation()
.delete_many(vec![
prisma::crdt_operation::model::equals(#model_id as i32),
prisma::crdt_operation::record_id::equals(rmp_serde::to_vec(&id).unwrap()),
prisma::crdt_operation::kind::equals(sd_sync::OperationKind::Create.to_string())
])
.exec()
.await?;
},
}
}
handle_crdt_ops_shared(id, *model_id, &model_name_snake)
}
ModelSyncType::Relation { item, group, .. } => {
let compound_id = format_ident!(
"{}",
group
.fields()
.unwrap()
.chain(item.fields().unwrap())
.map(|f| f.name())
.collect::<Vec<_>>()
.join("_")
);
let db_batch_items = {
let batch_item = |item: &RelationFieldWalker| {
let item_model_sync_id_field_name_snake = models
.iter()
.find(|m| m.0.name() == item.related_model().name())
.and_then(|(_m, sync)| sync.as_ref())
.map(|sync| snake_ident(sync.sync_id()[0].name()))
.unwrap();
let item_model_name_snake = snake_ident(item.related_model().name());
let item_field_name_snake = snake_ident(item.name());
quote! {
db.#item_model_name_snake()
.find_unique(
prisma::#item_model_name_snake::#item_model_sync_id_field_name_snake::equals(
id.#item_field_name_snake.#item_model_sync_id_field_name_snake.clone()
)
)
.select(prisma::#item_model_name_snake::select!({ id }))
}
};
[batch_item(group), batch_item(item)]
};
let create_items = {
let create_item = |item: &RelationFieldWalker, var: TokenStream| {
let item_model_name_snake = snake_ident(item.related_model().name());
quote!(
prisma::#item_model_name_snake::id::equals(#var.id)
)
};
[
create_item(item, quote!(item)),
create_item(group, quote!(group)),
]
};
quote! {
let (Some(group), Some(item)) =
(#(#db_batch_items.exec().await?),*) else {
panic!("item and group not found!");
};
let id = prisma::#model_name_snake::#compound_id(group.id, item.id);
match data {
sd_sync::CRDTOperationData::Create(_) => {
db.#model_name_snake()
.upsert(
id,
prisma::#model_name_snake::create(
#(#create_items),*,
vec![]
),
vec![],
)
.exec()
.await
.ok();
},
sd_sync::CRDTOperationData::Update { field, value } => {
let data = vec![prisma::#model_name_snake::SetParam::deserialize(&field, value).unwrap()];
db.#model_name_snake()
.upsert(
id,
prisma::#model_name_snake::create(
#(#create_items),*,
data.clone(),
),
data,
)
.exec()
.await
.ok();
},
sd_sync::CRDTOperationData::Delete => {
db.#model_name_snake()
.delete(id)
.exec()
.await
.ok();
},
}
}
handle_crdt_ops_relation(models, item, group, &model_name_snake)
}
_ => return None,
ModelSyncType::Local { .. } => return None,
};
Some(quote! {
@ -257,3 +77,210 @@ pub fn r#enum(models: Vec<ModelWithSyncType>) -> TokenStream {
}
}
}
fn handle_crdt_ops_relation(
models: &[ModelWithSyncType<'_>],
item: &RelationFieldWalker<'_>,
group: &RelationFieldWalker<'_>,
model_name_snake: &Ident,
) -> TokenStream {
let compound_id = format_ident!(
"{}",
group
.fields()
.expect("missing group fields")
.chain(item.fields().expect("missing item fields"))
.map(ScalarFieldWalker::name)
.collect::<Vec<_>>()
.join("_")
);
let db_batch_items = {
let batch_item = |item: &RelationFieldWalker<'_>| {
let item_model_sync_id_field_name_snake = models
.iter()
.find(|m| m.0.name() == item.related_model().name())
.and_then(|(_m, sync)| sync.as_ref())
.map(|sync| snake_ident(sync.sync_id()[0].name()))
.expect("missing sync id field name for relation");
let item_model_name_snake = snake_ident(item.related_model().name());
let item_field_name_snake = snake_ident(item.name());
quote! {
db.#item_model_name_snake()
.find_unique(
prisma::#item_model_name_snake::#item_model_sync_id_field_name_snake::equals(
id.#item_field_name_snake.#item_model_sync_id_field_name_snake.clone()
)
)
.select(prisma::#item_model_name_snake::select!({ id }))
}
};
[batch_item(group), batch_item(item)]
};
let create_items = {
let create_item = |item: &RelationFieldWalker<'_>, var: TokenStream| {
let item_model_name_snake = snake_ident(item.related_model().name());
quote!(
prisma::#item_model_name_snake::id::equals(#var.id)
)
};
[
create_item(item, quote!(item)),
create_item(group, quote!(group)),
]
};
quote! {
let (Some(group), Some(item)) =
(#(#db_batch_items.exec().await?),*) else {
panic!("item and group not found!");
};
let id = prisma::#model_name_snake::#compound_id(group.id, item.id);
match data {
sd_sync::CRDTOperationData::Create(_) => {
db.#model_name_snake()
.upsert(
id,
prisma::#model_name_snake::create(
#(#create_items),*,
vec![]
),
vec![],
)
.exec()
.await
.ok();
},
sd_sync::CRDTOperationData::Update { field, value } => {
let data = vec![prisma::#model_name_snake::SetParam::deserialize(&field, value).unwrap()];
db.#model_name_snake()
.upsert(
id,
prisma::#model_name_snake::create(
#(#create_items),*,
data.clone(),
),
data,
)
.exec()
.await
.ok();
},
sd_sync::CRDTOperationData::Delete => {
db.#model_name_snake()
.delete(id)
.exec()
.await
.ok();
},
}
}
}
#[inline]
fn handle_crdt_ops_shared(
id: &FieldWalker<'_>,
model_id: u16,
model_name_snake: &Ident,
) -> TokenStream {
let (get_id, equals_value, id_name_snake, create_id) = match id.refine() {
RefinedFieldWalker::Relation(rel) => {
let scalar_field = rel
.fields()
.expect("missing fields")
.next()
.expect("empty fields");
let id_name_snake = snake_ident(scalar_field.name());
let field_name_snake = snake_ident(rel.name());
let opposite_model_name_snake = snake_ident(
rel.opposite_relation_field()
.expect("missing opposite relation field")
.model()
.name(),
);
let relation_equals_condition = quote!(prisma::#opposite_model_name_snake::pub_id::equals(
id.#field_name_snake.pub_id.clone()
));
let rel_fetch = quote! {
let rel = db.#opposite_model_name_snake()
.find_unique(#relation_equals_condition)
.exec()
.await?
.unwrap();
};
(
Some(rel_fetch),
quote!(rel.id),
id_name_snake,
relation_equals_condition,
)
}
RefinedFieldWalker::Scalar(s) => {
let field_name_snake = snake_ident(s.name());
let thing = quote!(id.#field_name_snake.clone());
(None, thing.clone(), field_name_snake, thing)
}
};
quote! {
#get_id
match data {
sd_sync::CRDTOperationData::Create(data) => {
let data: Vec<_> = data.into_iter().map(|(field, value)| {
prisma::#model_name_snake::SetParam::deserialize(&field, value).unwrap()
}).collect();
db.#model_name_snake()
.upsert(
prisma::#model_name_snake::#id_name_snake::equals(#equals_value),
prisma::#model_name_snake::create(#create_id, data.clone()),
data
)
.exec()
.await?;
},
sd_sync::CRDTOperationData::Update { field, value } => {
let data = vec![
prisma::#model_name_snake::SetParam::deserialize(&field, value).unwrap()
];
db.#model_name_snake()
.upsert(
prisma::#model_name_snake::#id_name_snake::equals(#equals_value),
prisma::#model_name_snake::create(#create_id, data.clone()),
data,
)
.exec()
.await?;
},
sd_sync::CRDTOperationData::Delete => {
db.#model_name_snake()
.delete(prisma::#model_name_snake::#id_name_snake::equals(#equals_value))
.exec()
.await?;
db.crdt_operation()
.delete_many(vec![
prisma::crdt_operation::model::equals(#model_id as i32),
prisma::crdt_operation::record_id::equals(rmp_serde::to_vec(&id).unwrap()),
prisma::crdt_operation::kind::equals(sd_sync::OperationKind::Create.to_string())
])
.exec()
.await?;
},
}
}
}

View file

@ -1,3 +1,5 @@
use std::mem;
use serde::{Deserialize, Serialize};
use uhlc::NTP64;
use uuid::Uuid;
@ -6,11 +8,12 @@ use crate::{CRDTOperation, CRDTOperationData};
pub type CompressedCRDTOperationsForModel = Vec<(rmpv::Value, Vec<CompressedCRDTOperation>)>;
/// Stores a bunch of CRDTOperations in a more memory-efficient form for sending to the cloud.
/// Stores a bunch of [`CRDTOperation`]s in a more memory-efficient form for sending to the cloud.
#[derive(Serialize, Deserialize, Debug, PartialEq)]
pub struct CompressedCRDTOperations(pub Vec<(Uuid, Vec<(u16, CompressedCRDTOperationsForModel)>)>);
impl CompressedCRDTOperations {
#[must_use]
pub fn new(ops: Vec<CRDTOperation>) -> Self {
let mut compressed = vec![];
@ -32,34 +35,34 @@ impl CompressedCRDTOperations {
for op in ops_iter {
if instance_id != op.instance {
model.push((
std::mem::replace(&mut record_id, op.record_id.clone()),
std::mem::take(&mut record),
mem::replace(&mut record_id, op.record_id.clone()),
mem::take(&mut record),
));
instance.push((
std::mem::replace(&mut model_str, op.model),
std::mem::take(&mut model),
mem::replace(&mut model_str, op.model),
mem::take(&mut model),
));
compressed.push((
std::mem::replace(&mut instance_id, op.instance),
std::mem::take(&mut instance),
mem::replace(&mut instance_id, op.instance),
mem::take(&mut instance),
));
} else if model_str != op.model {
model.push((
std::mem::replace(&mut record_id, op.record_id.clone()),
std::mem::take(&mut record),
mem::replace(&mut record_id, op.record_id.clone()),
mem::take(&mut record),
));
instance.push((
std::mem::replace(&mut model_str, op.model),
std::mem::take(&mut model),
mem::replace(&mut model_str, op.model),
mem::take(&mut model),
));
} else if record_id != op.record_id {
model.push((
std::mem::replace(&mut record_id, op.record_id.clone()),
std::mem::take(&mut record),
mem::replace(&mut record_id, op.record_id.clone()),
mem::take(&mut record),
));
}
record.push(CompressedCRDTOperation::from(op))
record.push(CompressedCRDTOperation::from(op));
}
model.push((record_id, record));
@ -69,6 +72,7 @@ impl CompressedCRDTOperations {
Self(compressed)
}
#[must_use]
pub fn first(&self) -> Option<(Uuid, u16, &rmpv::Value, &CompressedCRDTOperation)> {
self.0.first().and_then(|(instance, data)| {
data.first().and_then(|(model, data)| {
@ -78,6 +82,7 @@ impl CompressedCRDTOperations {
})
}
#[must_use]
pub fn last(&self) -> Option<(Uuid, u16, &rmpv::Value, &CompressedCRDTOperation)> {
self.0.last().and_then(|(instance, data)| {
data.last().and_then(|(model, data)| {
@ -104,6 +109,7 @@ impl CompressedCRDTOperations {
self.len() == 0
}
#[must_use]
pub fn into_ops(self) -> Vec<CRDTOperation> {
let mut ops = vec![];
@ -117,7 +123,7 @@ impl CompressedCRDTOperations {
record_id: record_id.clone(),
timestamp: op.timestamp,
data: op.data,
})
});
}
}
}

View file

@ -1,4 +1,4 @@
use std::{collections::BTreeMap, fmt::Debug};
use std::{collections::BTreeMap, fmt};
use serde::{Deserialize, Serialize};
use specta::Type;
@ -11,8 +11,8 @@ pub enum OperationKind<'a> {
Delete,
}
impl std::fmt::Display for OperationKind<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
impl fmt::Display for OperationKind<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
OperationKind::Create => write!(f, "c"),
OperationKind::Update(field) => write!(f, "u:{field}"),
@ -36,11 +36,13 @@ pub enum CRDTOperationData {
}
impl CRDTOperationData {
#[must_use]
pub fn create() -> Self {
Self::Create(Default::default())
Self::Create(BTreeMap::default())
}
pub fn as_kind(&self) -> OperationKind {
#[must_use]
pub fn as_kind(&self) -> OperationKind<'_> {
match self {
Self::Create(_) => OperationKind::Create,
Self::Update { field, .. } => OperationKind::Update(field),
@ -62,17 +64,17 @@ pub struct CRDTOperation {
impl CRDTOperation {
#[must_use]
pub fn kind(&self) -> OperationKind {
pub fn kind(&self) -> OperationKind<'_> {
self.data.as_kind()
}
}
impl Debug for CRDTOperation {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
impl fmt::Debug for CRDTOperation {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("CRDTOperation")
.field("data", &self.data)
.field("model", &self.model)
.field("record_id", &self.record_id.to_string())
.finish()
.finish_non_exhaustive()
}
}

View file

@ -126,7 +126,7 @@ macro_rules! option_sync_entry {
#[macro_export]
macro_rules! sync_db_entry {
($v:expr, $($m:tt)*) => {{
let v = $v;
let v = $v.into();
($crate::sync_entry!(&v, $($m)*), $($m)*::set(Some(v)))
}}
}

View file

@ -1,3 +1,32 @@
#![warn(
clippy::all,
clippy::pedantic,
clippy::correctness,
clippy::perf,
clippy::style,
clippy::suspicious,
clippy::complexity,
clippy::nursery,
clippy::unwrap_used,
unused_qualifications,
rust_2018_idioms,
trivial_casts,
trivial_numeric_casts,
unused_allocation,
clippy::unnecessary_cast,
clippy::cast_lossless,
clippy::cast_possible_truncation,
clippy::cast_possible_wrap,
clippy::cast_precision_loss,
clippy::cast_sign_loss,
clippy::dbg_macro,
clippy::deprecated_cfg_attr,
clippy::separated_literal_suffix,
deprecated
)]
#![forbid(deprecated_in_future)]
#![allow(clippy::missing_errors_doc, clippy::module_name_repetitions)]
mod compressed;
mod crdt;
mod factory;

View file

@ -13,6 +13,7 @@ async-channel = { workspace = true }
async-trait = { workspace = true }
futures = { workspace = true }
futures-concurrency = { workspace = true }
pin-project-lite = { workspace = true }
thiserror = { workspace = true }
tokio = { workspace = true, features = [
"sync",
@ -26,7 +27,6 @@ uuid = { workspace = true, features = ["v4"] }
# Specific Task System dependencies
downcast-rs = "1.2"
pin-project = "1.1"
[dev-dependencies]
lending-stream = { workspace = true }

View file

@ -188,12 +188,13 @@ where
) -> impl Future<Output = Result<Self, Self::DeserializeError>> + Send;
}
/// Intermediate struct to wait until a pause or a cancel commands are sent by the user.
#[must_use = "`InterrupterFuture` does nothing unless polled"]
#[pin_project::pin_project]
pub struct InterrupterFuture<'recv> {
#[pin]
fut: chan::Recv<'recv, InterruptionRequest>,
pin_project_lite::pin_project! {
/// Intermediate struct to wait until a pause or a cancel commands are sent by the user.
#[must_use = "`InterrupterFuture` does nothing unless polled"]
pub struct InterrupterFuture<'recv> {
#[pin]
fut: chan::Recv<'recv, InterruptionRequest>,
}
}
impl Future for InterrupterFuture<'_> {

View file

@ -338,7 +338,17 @@ impl Task<SampleError> for SampleActorTask {
async fn run(&mut self, interrupter: &Interrupter) -> Result<ExecStatus, SampleError> {
info!("Actor data: {:#?}", self.actor_data);
self.timed_task.run(interrupter).await
let out = self.timed_task.run(interrupter).await?;
if let ExecStatus::Done(TaskOutput::Out(out)) = &out {
info!(
"Task completed with {} pauses",
out.downcast_ref::<TimedTaskOutput>()
.expect("we know the task type")
.pauses_count
);
}
Ok(out)
}
fn with_priority(&self) -> bool {

View file

@ -154,7 +154,7 @@ export const ExplorerTagBar = () => {
// extract the list of tags from each object in the selected items
const targetsTagList = Array.from(explorer.selectedItems.entries()).map(
// issues with type here. unsure as to why, and not causing any noticeable errors, so ignoring for now with as any
(item) => (item[0] as any).object.item.tags
(item) => (item[0] as any).item.object.tags
);
// iterate through each tag in the selected items and check if the tag we want to assign is already assigned

View file

@ -69,7 +69,7 @@ export const Component = () => {
}
});
const watchBackgroundProcessingPercentage = form.watch('background_processing_percentage');
// const watchBackgroundProcessingPercentage = form.watch('background_processing_percentage');
useDebouncedFormWatch(form, async (value) => {
if (await form.trigger()) {
@ -85,9 +85,9 @@ export const Component = () => {
image_labeler_version: value.image_labeler_version ?? null
});
if (value.background_processing_percentage != undefined) {
if (value.background_processing_percentage != null) {
await updateThumbnailerPreferences.mutateAsync({
background_processing_percentage: value.background_processing_percentage
// background_processing_percentage: value.background_processing_percentage
});
}
}

View file

@ -169,7 +169,7 @@ export type CameraData = { device_make: string | null; device_model: string | nu
export type CasId = string
export type ChangeNodeNameArgs = { name: string | null; p2p_port: Port | null; p2p_disabled: boolean | null; p2p_ipv6_disabled: boolean | null; p2p_relay_disabled: boolean | null; p2p_discovery: P2PDiscoveryState | null; p2p_remote_access: boolean | null; p2p_manual_peers: string[] | null; image_labeler_version: string | null }
export type ChangeNodeNameArgs = { name: string | null; p2p_port: Port | null; p2p_disabled: boolean | null; p2p_ipv6_disabled: boolean | null; p2p_relay_disabled: boolean | null; p2p_discovery: P2PDiscoveryState | null; p2p_remote_access: boolean | null; p2p_manual_peers: string[] | null }
export type Chapter = { id: number; start: [number, number]; end: [number, number]; time_base_den: number; time_base_num: number; metadata: Metadata }
@ -657,7 +657,7 @@ export type TextMatch = { contains: string } | { startsWith: string } | { endsWi
*/
export type ThumbKey = { shard_hex: string; cas_id: CasId; base_directory_str: string }
export type UpdateThumbnailerPreferences = { background_processing_percentage: number }
export type UpdateThumbnailerPreferences = Record<string, never>
export type VideoProps = { pixel_format: string | null; color_range: string | null; bits_per_channel: number | null; color_space: string | null; color_primaries: string | null; color_transfer: string | null; field_order: string | null; chroma_location: string | null; width: number; height: number; aspect_ratio_num: number | null; aspect_ratio_den: number | null; properties: string[] }

View file

@ -1,2 +1,2 @@
[toolchain]
channel = "1.78"
channel = "1.79"