diff --git a/.clippy.toml b/.clippy.toml new file mode 100644 index 000000000..154626ef4 --- /dev/null +++ b/.clippy.toml @@ -0,0 +1 @@ +allow-unwrap-in-tests = true diff --git a/core/crates/sync/src/ingest.rs b/core/crates/sync/src/ingest.rs index 5b5bfff42..4b37fc13a 100644 --- a/core/crates/sync/src/ingest.rs +++ b/core/crates/sync/src/ingest.rs @@ -118,10 +118,7 @@ impl Actor { let mut timestamp = { let mut clocks = self.timestamps.write().await; - clocks - .entry(op.instance) - .or_insert_with(|| op.timestamp) - .clone() + *clocks.entry(op.instance).or_insert_with(|| op.timestamp) }; if timestamp < op.timestamp { diff --git a/core/crates/sync/src/manager.rs b/core/crates/sync/src/manager.rs index 42ba1b158..35d0dcb46 100644 --- a/core/crates/sync/src/manager.rs +++ b/core/crates/sync/src/manager.rs @@ -14,19 +14,19 @@ pub struct Manager { shared: Arc, } -pub struct SyncManagerNew { - pub manager: Manager, - pub rx: broadcast::Receiver, -} - #[derive(serde::Serialize, serde::Deserialize, Debug, PartialEq, Eq)] pub struct GetOpsArgs { pub clocks: Vec<(Uuid, NTP64)>, pub count: u32, } +pub struct New { + pub manager: T, + pub rx: broadcast::Receiver, +} + impl Manager { - pub fn new(db: &Arc, instance: Uuid) -> SyncManagerNew { + pub fn new(db: &Arc, instance: Uuid) -> New { let (tx, rx) = broadcast::channel(64); let timestamps: Timestamps = Default::default(); @@ -41,7 +41,7 @@ impl Manager { let ingest = ingest::Actor::spawn(shared.clone()); - SyncManagerNew { + New { manager: Self { shared, tx, ingest }, rx, } diff --git a/core/crates/sync/tests/lib.rs b/core/crates/sync/tests/lib.rs index 942bc67d0..e683dfa80 100644 --- a/core/crates/sync/tests/lib.rs +++ b/core/crates/sync/tests/lib.rs @@ -112,15 +112,14 @@ async fn bruh() -> Result<(), Box> { async move { while let Ok(msg) = sync_rx1.recv().await { - match msg { - SyncMessage::Created => instance2 + if let SyncMessage::Created = msg { + instance2 .sync .ingest .event_tx .send(ingest::Event::Notification) .await - .unwrap(), - _ => {} + .unwrap() } } } diff --git a/core/src/api/jobs.rs b/core/src/api/jobs.rs index a33d5b83b..b32066a18 100644 --- a/core/src/api/jobs.rs +++ b/core/src/api/jobs.rs @@ -48,8 +48,8 @@ pub(crate) fn mount() -> AlphaRouter { } }; - let instant = intervals.entry(progress_event.id).or_insert_with(|| - Instant::now() + let instant = intervals.entry(progress_event.id).or_insert_with( + Instant::now ); if instant.elapsed() <= Duration::from_secs_f64(1.0 / 30.0) { diff --git a/core/src/api/locations.rs b/core/src/api/locations.rs index 9bc6bae62..5c0164c2a 100644 --- a/core/src/api/locations.rs +++ b/core/src/api/locations.rs @@ -9,7 +9,7 @@ use crate::{ util::AbortOnDrop, }; -use std::path::{Path, PathBuf}; +use std::path::PathBuf; use rspc::{self, alpha::AlphaRouter, ErrorCode}; use serde::{Deserialize, Serialize}; diff --git a/core/src/api/utils/invalidate.rs b/core/src/api/utils/invalidate.rs index 29308edd1..d62a369c4 100644 --- a/core/src/api/utils/invalidate.rs +++ b/core/src/api/utils/invalidate.rs @@ -114,20 +114,20 @@ impl InvalidRequests { /// ); /// ``` #[macro_export] -#[allow(clippy::crate_in_macro_def)] +// #[allow(clippy::crate_in_macro_def)] macro_rules! invalidate_query { ($ctx:expr, $key:literal) => {{ - let ctx: &crate::library::Library = &$ctx; // Assert the context is the correct type + let ctx: &$crate::library::Library = &$ctx; // Assert the context is the correct type #[cfg(debug_assertions)] { #[ctor::ctor] fn invalidate() { - crate::api::utils::INVALIDATION_REQUESTS + $crate::api::utils::INVALIDATION_REQUESTS .lock() .unwrap() .queries - .push(crate::api::utils::InvalidationRequest { + .push($crate::api::utils::InvalidationRequest { key: $key, arg_ty: None, result_ty: None, @@ -139,23 +139,23 @@ macro_rules! invalidate_query { ::tracing::trace!(target: "sd_core::invalidate-query", "invalidate_query!(\"{}\") at {}", $key, concat!(file!(), ":", line!())); // The error are ignored here because they aren't mission critical. If they fail the UI might be outdated for a bit. - ctx.emit(crate::api::CoreEvent::InvalidateOperation( - crate::api::utils::InvalidateOperationEvent::dangerously_create($key, serde_json::Value::Null, None) + ctx.emit($crate::api::CoreEvent::InvalidateOperation( + $crate::api::utils::InvalidateOperationEvent::dangerously_create($key, serde_json::Value::Null, None) )) }}; ($ctx:expr, $key:literal: $arg_ty:ty, $arg:expr $(,)?) => {{ let _: $arg_ty = $arg; // Assert the type the user provided is correct - let ctx: &crate::library::Library = &$ctx; // Assert the context is the correct type + let ctx: &$crate::library::Library = &$ctx; // Assert the context is the correct type #[cfg(debug_assertions)] { #[ctor::ctor] fn invalidate() { - crate::api::utils::INVALIDATION_REQUESTS + $crate::api::utils::INVALIDATION_REQUESTS .lock() .unwrap() .queries - .push(crate::api::utils::InvalidationRequest { + .push($crate::api::utils::InvalidationRequest { key: $key, arg_ty: Some(<$arg_ty as rspc::internal::specta::Type>::reference(rspc::internal::specta::DefOpts { parent_inline: false, @@ -172,8 +172,8 @@ macro_rules! invalidate_query { // The error are ignored here because they aren't mission critical. If they fail the UI might be outdated for a bit. let _ = serde_json::to_value($arg) .map(|v| - ctx.emit(crate::api::CoreEvent::InvalidateOperation( - crate::api::utils::InvalidateOperationEvent::dangerously_create($key, v, None), + ctx.emit($crate::api::CoreEvent::InvalidateOperation( + $crate::api::utils::InvalidateOperationEvent::dangerously_create($key, v, None), )) ) .map_err(|_| { @@ -182,17 +182,17 @@ macro_rules! invalidate_query { }}; ($ctx:expr, $key:literal: $arg_ty:ty, $arg:expr, $result_ty:ty: $result:expr $(,)?) => {{ let _: $arg_ty = $arg; // Assert the type the user provided is correct - let ctx: &crate::library::Library = &$ctx; // Assert the context is the correct type + let ctx: &$crate::library::Library = &$ctx; // Assert the context is the correct type #[cfg(debug_assertions)] { #[ctor::ctor] fn invalidate() { - crate::api::utils::INVALIDATION_REQUESTS + $crate::api::utils::INVALIDATION_REQUESTS .lock() .unwrap() .queries - .push(crate::api::utils::InvalidationRequest { + .push($crate::api::utils::InvalidationRequest { key: $key, arg_ty: Some(<$arg_ty as rspc::internal::specta::Type>::reference(rspc::internal::specta::DefOpts { parent_inline: false, @@ -214,8 +214,8 @@ macro_rules! invalidate_query { .and_then(|arg| serde_json::to_value($result) .map(|result| - ctx.emit(crate::api::CoreEvent::InvalidateOperation( - crate::api::utils::InvalidateOperationEvent::dangerously_create($key, arg, Some(result)), + ctx.emit($crate::api::CoreEvent::InvalidateOperation( + $crate::api::utils::InvalidateOperationEvent::dangerously_create($key, arg, Some(result)), )) ) ) diff --git a/core/src/library/manager/mod.rs b/core/src/library/manager/mod.rs index 7ae1d545a..8bb26e2bd 100644 --- a/core/src/library/manager/mod.rs +++ b/core/src/library/manager/mod.rs @@ -387,7 +387,7 @@ impl Libraries { identity, // key_manager, db, - &node, + node, Arc::new(sync.manager), ) .await; diff --git a/core/src/object/fs/copy.rs b/core/src/object/fs/copy.rs index ddba27661..1fd5aa6ef 100644 --- a/core/src/object/fs/copy.rs +++ b/core/src/object/fs/copy.rs @@ -137,26 +137,39 @@ impl StatefulJob for FileCopierJobInit { .expect("We got the children path from the read_dir, so it should be a child of the source path"), ); - // Currently not supporting file_name suffixes children files in a directory being copied - more_steps.push(FileCopierJobStep { - target_full_path: target_children_full_path, - source_file_data: get_file_data_from_isolated_file_path( - &ctx.library.db, + match get_file_data_from_isolated_file_path( + &ctx.library.db, + &data.sources_location_path, + &IsolatedFilePathData::new( + init.source_location_id, &data.sources_location_path, - &IsolatedFilePathData::new( - init.source_location_id, - &data.sources_location_path, - &children_path, - children_entry - .metadata() - .await - .map_err(|e| FileIOError::from((&children_path, e)))? - .is_dir(), - ) - .map_err(FileSystemJobsError::from)?, + &children_path, + children_entry + .metadata() + .await + .map_err(|e| FileIOError::from((&children_path, e)))? + .is_dir(), ) - .await?, - }); + .map_err(FileSystemJobsError::from)?, + ) + .await + { + Ok(source_file_data) => { + // Currently not supporting file_name suffixes children files in a directory being copied + more_steps.push(FileCopierJobStep { + target_full_path: target_children_full_path, + source_file_data, + }); + } + Err(FileSystemJobsError::FilePathNotFound(path)) => { + // FilePath doesn't exist in the database, it possibly wasn't indexed, so we skip it + warn!( + "Skipping duplicating {} as it wasn't indexed", + path.display() + ); + } + Err(e) => return Err(e.into()), + } } Ok(more_steps.into()) diff --git a/core/src/object/fs/mod.rs b/core/src/object/fs/mod.rs index 5b8ad93d3..1236e36a4 100644 --- a/core/src/object/fs/mod.rs +++ b/core/src/object/fs/mod.rs @@ -95,6 +95,7 @@ pub async fn get_file_data_from_isolated_file_path( location_path: impl AsRef, iso_file_path: &IsolatedFilePathData<'_>, ) -> Result { + let location_path = location_path.as_ref(); db.file_path() .find_unique(iso_file_path.into()) .include(file_path_with_object::include()) @@ -102,16 +103,12 @@ pub async fn get_file_data_from_isolated_file_path( .await? .ok_or_else(|| { FileSystemJobsError::FilePathNotFound( - AsRef::::as_ref(iso_file_path) - .to_path_buf() - .into_boxed_path(), + location_path.join(iso_file_path).into_boxed_path(), ) }) .and_then(|path_data| { Ok(FileData { - full_path: location_path - .as_ref() - .join(IsolatedFilePathData::try_from(&path_data)?), + full_path: location_path.join(IsolatedFilePathData::try_from(&path_data)?), file_path: path_data, }) }) diff --git a/core/src/p2p/pairing/mod.rs b/core/src/p2p/pairing/mod.rs index a33ec736a..f96668d85 100644 --- a/core/src/p2p/pairing/mod.rs +++ b/core/src/p2p/pairing/mod.rs @@ -129,8 +129,7 @@ impl PairingManager { .get_all() .await .into_iter() - .find(|i| i.id == library_id) - .is_some() + .any(|i| i.id == library_id) { self.emit_progress(pairing_id, PairingStatus::LibraryAlreadyExists); @@ -246,7 +245,7 @@ impl PairingManager { .send(P2PEvent::PairingRequest { id: pairing_id, name: remote_instance.node_name.clone(), - os: remote_instance.node_platform.clone().into(), + os: remote_instance.node_platform.into(), }) .ok(); diff --git a/core/src/p2p/pairing/proto.rs b/core/src/p2p/pairing/proto.rs index 27313d86d..6f8c7baf0 100644 --- a/core/src/p2p/pairing/proto.rs +++ b/core/src/p2p/pairing/proto.rs @@ -222,8 +222,8 @@ mod tests { node_id: Uuid::new_v4(), node_name: "Node Name".into(), node_platform: Platform::current(), - last_seen: Utc::now().into(), - date_created: Utc::now().into(), + last_seen: Utc::now(), + date_created: Utc::now(), }; { diff --git a/core/src/p2p/sync/mod.rs b/core/src/p2p/sync/mod.rs index 14a8b5437..e8c630522 100644 --- a/core/src/p2p/sync/mod.rs +++ b/core/src/p2p/sync/mod.rs @@ -263,7 +263,7 @@ mod originator { dbg!(&library.instances); // TODO: Deduplicate any duplicate peer ids -> This is an edge case but still - for (_, instance) in &library.instances { + for instance in library.instances.values() { let InstanceState::Connected(peer_id) = *instance else { continue; }; @@ -276,12 +276,7 @@ mod originator { "Alerting peer '{peer_id:?}' of new sync events for library '{library_id:?}'" ); - let mut stream = p2p - .manager - .stream(peer_id.clone()) - .await - .map_err(|_| ()) - .unwrap(); // TODO: handle providing incorrect peer id + let mut stream = p2p.manager.stream(peer_id).await.map_err(|_| ()).unwrap(); // TODO: handle providing incorrect peer id stream .write_all(&Header::Sync(library_id).to_bytes()) diff --git a/core/src/p2p/sync/proto.rs b/core/src/p2p/sync/proto.rs index 0c4295cc9..bcbe12bb7 100644 --- a/core/src/p2p/sync/proto.rs +++ b/core/src/p2p/sync/proto.rs @@ -27,10 +27,10 @@ impl SyncMessage { #[cfg(test)] mod tests { - use sd_core_sync::NTP64; - use sd_sync::SharedOperation; - use serde_json::Value; - use uuid::Uuid; + // use sd_core_sync::NTP64; + // use sd_sync::SharedOperation; + // use serde_json::Value; + // use uuid::Uuid; use super::*; diff --git a/core/src/preferences/kv.rs b/core/src/preferences/kv.rs index 8ca78875d..b42e1c8e8 100644 --- a/core/src/preferences/kv.rs +++ b/core/src/preferences/kv.rs @@ -42,7 +42,8 @@ impl PreferenceValue { pub fn new(value: impl Serialize) -> Self { let mut bytes = vec![]; - rmp_serde::encode::write_named(&mut bytes, &value).unwrap(); + rmp_serde::encode::write_named(&mut bytes, &value) + .expect("Failed to serialize preference value"); // let value = rmpv::decode::read_value(&mut bytes.as_slice()).unwrap(); @@ -52,7 +53,8 @@ impl PreferenceValue { pub fn from_value(value: Value) -> Self { let mut bytes = vec![]; - rmpv::encode::write_value(&mut bytes, &value).unwrap(); + rmpv::encode::write_value(&mut bytes, &value) + .expect("Failed to serialize preference value"); Self(bytes) } @@ -76,6 +78,7 @@ pub enum Entry { Nested(Entries), } +#[allow(clippy::unwrap_used, clippy::panic)] impl Entry { pub fn expect_value(self) -> T { match self { diff --git a/core/src/preferences/mod.rs b/core/src/preferences/mod.rs index 7dabf1d61..c036af351 100644 --- a/core/src/preferences/mod.rs +++ b/core/src/preferences/mod.rs @@ -1,14 +1,15 @@ -mod kv; - -pub use kv::*; -use specta::Type; - use crate::prisma::PrismaClient; + use std::collections::HashMap; use serde::{Deserialize, Serialize}; +use specta::Type; +use tracing::error; use uuid::Uuid; +mod kv; +pub use kv::*; + // Preferences are a set of types that are serialized as a list of key-value pairs, // where nested type keys are serialized as a dot-separated path. // They are serailized as a list because this allows preferences to be a synchronisation boundary, @@ -36,9 +37,15 @@ impl LibraryPreferences { let prefs = PreferenceKVs::new( kvs.into_iter() .filter_map(|data| { - let a = rmpv::decode::read_value(&mut data.value?.as_slice()).unwrap(); - - Some((PreferenceKey::new(data.key), PreferenceValue::from_value(a))) + rmpv::decode::read_value(&mut data.value?.as_slice()) + .map_err(|e| error!("{e:#?}")) + .ok() + .map(|value| { + ( + PreferenceKey::new(data.key), + PreferenceValue::from_value(value), + ) + }) }) .collect(), ); @@ -101,9 +108,10 @@ where entries .into_iter() .map(|(key, value)| { - let id = Uuid::parse_str(&key).unwrap(); - - (id, V::from_entries(value.expect_nested())) + ( + Uuid::parse_str(&key).expect("invalid uuid in preferences"), + V::from_entries(value.expect_nested()), + ) }) .collect() } diff --git a/core/src/util/mpscrr.rs b/core/src/util/mpscrr.rs index d7ba7a858..c3a108467 100644 --- a/core/src/util/mpscrr.rs +++ b/core/src/util/mpscrr.rs @@ -276,7 +276,7 @@ mod tests { .await .unwrap(); - assert!(true, "recv a closed"); + // assert!(true, "recv a closed"); } }); @@ -297,7 +297,7 @@ mod tests { .await .unwrap(); - assert!(true, "recv b closed"); + // assert!(true, "recv b closed"); } });