mirror of
https://github.com/spacedriveapp/spacedrive
synced 2024-07-04 13:23:28 +00:00
Squashed commit of the following:
commit 36ae94c998874b5aaf79be0b87d1c05c605b1ff0 Merge: df02017299126332df
Author: Aditya <raghavbhai4545@gmail.com> Date: Sat Apr 27 21:35:22 2024 +0530 Merge branch 'spacedriveapp:main' into main commit9126332df1
Author: ameer2468 <33054370+ameer2468@users.noreply.github.com> Date: Sat Apr 27 18:08:07 2024 +0300 [MOB-89] Separate headers (#2408) * separate headers improvements to headers cleanup missed cleanup documentation * Update SearchStack.tsx commita61a7bee65
Author: ameer2468 <33054370+ameer2468@users.noreply.github.com> Date: Fri Apr 26 20:36:21 2024 +0300 Windows mouse resize fix (#2407) Update useMouseItemResize.ts commit9384bade61
Author: Vítor Vasconcellos <vasconcellos.dev@gmail.com> Date: Thu Apr 25 21:29:55 2024 -0300 Revert OpenDAL for ephemeral location (#2399) * Revert "OpenDAL - Ephemeral Locations (#2283)" This reverts commit2848782e8e
. * Format * Fix some diff problems commite76ff78f3c
Author: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Thu Apr 25 20:29:55 2024 -0400 Alpha 0.2.13 (#2394) bump commit476447ab70
Author: Vítor Vasconcellos <vasconcellos.dev@gmail.com> Date: Thu Apr 25 21:20:36 2024 -0300 Fix server release again (#2403) * Fix server release again * small improvement to regex commitab46cffa11
Author: Jamie Pine <32987599+jamiepine@users.noreply.github.com> Date: Thu Apr 25 14:37:25 2024 -0700 Reactive file identification (#2396) * yes * Explain mysterious if * Use id alias just for consistency reasons * yes * Rust fmt * fix ts --------- Co-authored-by: Ericson "Fogo" Soares <ericson.ds999@gmail.com> Co-authored-by: Utku Bakir <74243531+utkubakir@users.noreply.github.com> commit64bbce32e9
Author: Utku <74243531+utkubakir@users.noreply.github.com> Date: Thu Apr 25 16:06:35 2024 -0400 Fix title (#2398) * fix task manager title * 2 more config item commit310eb28e63
Author: Vítor Vasconcellos <vasconcellos.dev@gmail.com> Date: Thu Apr 25 14:58:50 2024 -0300 Fix `cargo test` & improve `pnpm prep` native deps download (#2393) Couple of fixes - Increase `pnpm prep` connection timeout to 5min, to better handle downloading native deps under flaky network conditions - Fix `cargo test` and cache-factory CI - Clippy and fmt commitb86d3d27cb
Author: ameer2468 <33054370+ameer2468@users.noreply.github.com> Date: Thu Apr 25 20:29:46 2024 +0300 [ENG-1762] Reverse mouse resize direction (#2395) Update useMouseItemResize.ts commit449337285d
Author: Artsiom Voitas <artsiom.voitas@gmail.com> Date: Thu Apr 25 19:26:36 2024 +0300 Improved translation into Belarusian and Russian (#2391) * feat: improved translation on belarusian and russian * updated keys related to vacuum * updated keys related to vacuum commitb1ffbee9b1
Author: Jamie Pine <32987599+jamiepine@users.noreply.github.com> Date: Thu Apr 25 09:14:43 2024 -0700 Fix thumbnail generation reactivity (#2392) fix commit73f521a3b8
Author: Ericson "Fogo" Soares <ericson.ds999@gmail.com> Date: Thu Apr 25 01:06:11 2024 -0300 [ENG-1629] Write new file identifier with the task system (#2334) * Introduce deep vs shallow for indexer tasks with different priorities * Make job wait to dispatch if it's paused * Extract file metadata task on file identifier job * Some initial drafts on object processor task * Object processor task for file identifier * File Identifier job and shallow commit463babe1d4
Author: Heavysnowjakarta <54460050+HeavySnowJakarta@users.noreply.github.com> Date: Thu Apr 25 07:38:34 2024 +0800 I18n polish (zh-cn) (#2337) * i18n some polishes * reviewed 1st-100th strings of zh-cn i18n * change the indent to 2 space characters commit2c777e53f1
Author: Vítor Vasconcellos <vasconcellos.dev@gmail.com> Date: Wed Apr 24 20:37:38 2024 -0300 Fix core test (#2386) * Fix core test * Import CompressedCRDTOperations --------- Co-authored-by: Ericson "Fogo" Soares <ericson.ds999@gmail.com> commit57b0139240
Author: ameer2468 <33054370+ameer2468@users.noreply.github.com> Date: Thu Apr 25 02:34:24 2024 +0300 [MOB-90] Visual adjustments (#2383) * Visual adjustments * Update Tags.tsx * cleanup * remove prop * remove hitslop * sectionlist commite0f540a1be
Author: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Wed Apr 24 14:20:51 2024 -0400 Small Trash UI fixes (#2385) * Update index.tsx * More ui fixes + toast * Update index.tsx * Add Translations commit279aaf2c50
Author: Utku <74243531+utkubakir@users.noreply.github.com> Date: Wed Apr 24 12:48:14 2024 -0400 hide placeholders (#2384) commit3bed56d4d9
Author: Utku <74243531+utkubakir@users.noreply.github.com> Date: Wed Apr 24 10:25:22 2024 -0400 Alpha 0.2.12 (#2382) * pnpm * alpha 0.2.12 * make pnpm version non strict commit0b6bd050a0
Author: Oscar Beaumont <oscar@otbeaumont.me> Date: Wed Apr 24 18:09:18 2024 +0800 Fix main (#2381) * fix * fix commitae6c49b0ba
Author: Oscar Beaumont <oscar@otbeaumont.me> Date: Wed Apr 24 16:43:30 2024 +0800 Improved p2p settings (#2379) improved p2p settings commit918c2a987d
Author: Brendan Allan <brendonovich@outlook.com> Date: Wed Apr 24 16:26:50 2024 +0800 Batch ingest sync operations (#2378) batch ingest sync operations commit643bd3a142
Author: Oscar Beaumont <oscar@otbeaumont.me> Date: Wed Apr 24 16:27:31 2024 +0800 Block size (#2377) Block size + some Clippy commite009a0478c
Author: Utku <74243531+utkubakir@users.noreply.github.com> Date: Tue Apr 23 19:20:59 2024 -0400 Revert "[MOB-85] Better headers" (#2376) Revert "[MOB-85] Better headers (#2375)" This reverts commit6a556a457d
. commit6a556a457d
Author: ameer2468 <33054370+ameer2468@users.noreply.github.com> Date: Wed Apr 24 01:21:31 2024 +0300 [MOB-85] Better headers (#2375) * wip * improve headers * cleanup commitb4037d6537
Author: Arnab Chakraborty <11457760+Rocky43007@users.noreply.github.com> Date: Mon Apr 22 15:46:10 2024 -0400 Open Trash from the application (#2338) * Open Trash from the application * Working Trash Sidebar Button * Small UI fixes * Update common.json * Move openTrash to Tauri Command instead of RSPC * format and remove type assertion --------- Co-authored-by: Utku Bakir <74243531+utkubakir@users.noreply.github.com> commit745399ecab
Author: nikec <43032218+niikeec@users.noreply.github.com> Date: Mon Apr 22 20:54:42 2024 +0200 [ENG-1751] Improve active item handling (#2367) base commit959ccdfd98
Author: Oscar Beaumont <oscar@otbeaumont.me> Date: Mon Apr 22 20:43:44 2024 +0800 Reintroduce P2P Settings (#2365) * redo backend to be less cringe * fixed up commitef969f1ada
Author: Oscar Beaumont <oscar@otbeaumont.me> Date: Mon Apr 22 19:47:47 2024 +0800 Remove indexer rules from ephemeral indexer (#2319) remove indexer rules from ephemeral indexer commit548fff1e96
Author: Brendan Allan <brendonovich@outlook.com> Date: Mon Apr 22 18:29:54 2024 +0800 Ignore empty object/filepath search filters (#2371) commit52c5c2bfe7
Author: Oscar Beaumont <oscar@otbeaumont.me> Date: Mon Apr 22 18:28:35 2024 +0800 Show errors creating P2P listeners on startup (#2372) * do it * fix accuracy * `useRef` as god intended commit20e5430eaf
Author: nikec <43032218+niikeec@users.noreply.github.com> Date: Mon Apr 22 12:27:30 2024 +0200 [ENG-1753] Only open quick preview when items are selected (#2374) only toggle when items are selected commit13e4ff6107
Author: nikec <43032218+niikeec@users.noreply.github.com> Date: Mon Apr 22 12:25:53 2024 +0200 [ENG-1752] Fix explorer selection reset when closing quick preview via keybind (#2373) prevent selection reset commit51c94c88e3
Author: Oscar Beaumont <oscar@otbeaumont.me> Date: Mon Apr 22 18:12:06 2024 +0800 Fix Docker start command (#2370) commitd689e7e58a
Author: ameer2468 <33054370+ameer2468@users.noreply.github.com> Date: Sun Apr 21 17:28:27 2024 +0300 [ENG-1750] Update context menu colors (#2369) update context menu colors commit df0201729278dfa11126b41922e404146c151a35 Merge:5624054f1
619a4c8b6 Author: Aditya <raghavbhai4545@gmail.com> Date: Sat Apr 20 13:14:52 2024 +0530 Merge branch 'main' of https://github.com/Raghav-45/spacedrive commit947354f6c0
Author: Oscar Beaumont <oscar@otbeaumont.me> Date: Sat Apr 20 11:21:20 2024 +0800 Remove files over p2p feature (#2364) * goodbye * types * a commitf97a761346
Author: ameer2468 <33054370+ameer2468@users.noreply.github.com> Date: Sat Apr 20 02:18:54 2024 +0300 [ENG-1745] Mouse wheel resize (#2366) * Resize layout items with mouse wheel icon/item size using mouse wheel Update useMouseItemResize.ts Update useMouseItemResize.ts * improve comment * fb * Update useMouseItemResize.ts * Update IconSize.tsx commit 619a4c8b6dfe7239bce1b54b528a3176aa7350da Merge: df4f6279b795bb18d1
Author: Aditya <raghavbhai4545@gmail.com> Date: Tue Aug 29 16:59:18 2023 +0530 Merge branch 'spacedriveapp:main' into main commit df4f6279bfbd7bdaa120212b19db72cfae0d17c7 Merge: dfb519206e4b03619d
Author: Aditya <raghavbhai4545@gmail.com> Date: Tue Aug 22 20:44:09 2023 +0530 Merge branch 'spacedriveapp:main' into main commit dfb51920667c24ff05b16ebc63bf4aea33225002 Merge: c1bfc3296a0a1c6766
Author: Aditya <raghavbhai4545@gmail.com> Date: Thu Aug 17 21:22:49 2023 +0530 Merge branch 'spacedriveapp:main' into main commit c1bfc3296ee7686a6a142d74a91cf13cf4bd7677 Merge: de274c3319c0aec816
Author: Aditya <raghavbhai4545@gmail.com> Date: Tue Aug 15 19:43:43 2023 +0530 Merge branch 'spacedriveapp:main' into main commit de274c3317cff942e9c3a4f2c8c08819a897d251 Merge: 14faf0bcec86a728a1
Author: Aditya <raghavbhai4545@gmail.com> Date: Sun Aug 13 21:54:16 2023 +0530 Merge branch 'spacedriveapp:main' into main commit 14faf0bce2ee9123bf66706812357d6aefc44dea Merge: 3e013d8bdbaf032883
Author: Aditya <raghavbhai4545@gmail.com> Date: Thu Aug 10 06:54:01 2023 -0400 Merge branch 'spacedriveapp:main' into main commit 3e013d8bdef2ba59536c90044be4312336b6cd8a Merge:2e702f2eb
7708ba585
Author: Aditya <raghavbhai4545@gmail.com> Date: Tue Aug 8 11:21:07 2023 -0400 Merge branch 'spacedriveapp:main' into main commit2e702f2eba
Author: Brendan Allan <brendonovich@outlook.com> Date: Tue Aug 8 07:58:58 2023 -0700 Mention pnpm dev:web in CONTRIBUTING.md commita1c5c55a37
Author: Raghav-45 <77260113+Raghav-45@users.noreply.github.com> Date: Tue Aug 8 18:49:50 2023 +0530 Update command to run server I encountered an issue where the cargo run -p server command was not functioning properly. It took me nearly an hour to pinpoint the problem, which turned out to be related to a modification in the Cargo.toml file. This change was made by @Brendonovich during their work on issue #1181, which pertained to *syncing ingestion*. Initially, I believed that re-cloning the repository from GitHub would resolve the issue. However, after attempting this solution exactly 5 times, I realized my assumption was incorrect. Despite the time and effort spent, I was able to successfully identify and rectify the problem.
This commit is contained in:
parent
ca40c8e23a
commit
cd2435edaf
2
.github/actions/setup-pnpm/action.yml
vendored
2
.github/actions/setup-pnpm/action.yml
vendored
|
@ -11,7 +11,7 @@ runs:
|
|||
- name: Install pnpm
|
||||
uses: pnpm/action-setup@v3
|
||||
with:
|
||||
version: 9.0.2
|
||||
version: 9.0.6
|
||||
|
||||
- name: Install Node.js
|
||||
uses: actions/setup-node@v4
|
||||
|
|
10
.github/workflows/server.yml
vendored
10
.github/workflows/server.yml
vendored
|
@ -48,8 +48,14 @@ jobs:
|
|||
working-directory: /tmp
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
curl -SsJLO https://passt.top/builds/latest/x86_64/passt_954589b-1_all.deb
|
||||
sudo dpkg -i passt_954589b-1_all.deb
|
||||
|
||||
deb="$(
|
||||
curl -SsL https://passt.top/builds/latest/x86_64 \
|
||||
| grep -oP 'passt[^\.<>'\''"]+\.deb' | sort -u | head -n1
|
||||
)"
|
||||
|
||||
curl -SsJLO "https://passt.top/builds/latest/x86_64/${deb}"
|
||||
sudo dpkg -i "${deb}"
|
||||
|
||||
- name: Determine image name & tag
|
||||
id: image_info
|
||||
|
|
115
.vscode/i18n-ally-reviews.yml
vendored
Normal file
115
.vscode/i18n-ally-reviews.yml
vendored
Normal file
|
@ -0,0 +1,115 @@
|
|||
# Review comments generated by i18n-ally. Please commit this file.
|
||||
|
||||
reviews:
|
||||
about_vision_text:
|
||||
locales:
|
||||
zh-CN:
|
||||
comments:
|
||||
- user:
|
||||
name: Heavysnowjakarta
|
||||
email: heavysnowjakarta@gmail.com
|
||||
id: OS2GadFYJi0w8WbQ1KpUe
|
||||
type: approve
|
||||
comment: 疑似翻译腔。这个地方不太好译。
|
||||
time: '2024-04-16T02:03:55.931Z'
|
||||
all_jobs_have_been_cleared:
|
||||
locales:
|
||||
zh-CN:
|
||||
comments:
|
||||
- user:
|
||||
name: Heavysnowjakarta
|
||||
email: heavysnowjakarta@gmail.com
|
||||
id: hwThsx7VP-THpRXov2MB6
|
||||
type: comment
|
||||
comment: 要不要把“清除”改为“完成”?
|
||||
time: '2024-04-16T10:56:22.929Z'
|
||||
archive_info:
|
||||
locales:
|
||||
zh-CN:
|
||||
comments:
|
||||
- user:
|
||||
name: Heavysnowjakarta
|
||||
email: heavysnowjakarta@gmail.com
|
||||
id: pW79_SMSNiOyRj94kdSZO
|
||||
type: comment
|
||||
comment: 不太通顺。“位置”是否要加定语修饰?
|
||||
time: '2024-04-16T11:03:10.218Z'
|
||||
changelog_page_description:
|
||||
locales:
|
||||
zh-CN:
|
||||
comments:
|
||||
- user:
|
||||
name: Heavysnowjakarta
|
||||
email: heavysnowjakarta@gmail.com
|
||||
id: JN3YruMypxX5wuaMjD8Hu
|
||||
type: comment
|
||||
comment: 口语化显得更自然些。
|
||||
time: '2024-04-16T11:05:27.478Z'
|
||||
clouds:
|
||||
locales:
|
||||
zh-CN:
|
||||
comments:
|
||||
- user:
|
||||
name: Heavysnowjakarta
|
||||
email: heavysnowjakarta@gmail.com
|
||||
id: ebAW-cnfA4llVgee6CRmF
|
||||
type: comment
|
||||
comment: 一个字太少。
|
||||
time: '2024-04-16T11:06:06.594Z'
|
||||
coordinates:
|
||||
locales:
|
||||
zh-CN:
|
||||
comments:
|
||||
- user:
|
||||
name: Heavysnowjakarta
|
||||
email: heavysnowjakarta@gmail.com
|
||||
id: HJLIcCmrHV1ZwCsAJOSiS
|
||||
type: comment
|
||||
comment: 有可能应该改成“地理坐标”。
|
||||
time: '2024-04-16T11:07:21.331Z'
|
||||
create_library_description:
|
||||
locales:
|
||||
zh-CN:
|
||||
comments:
|
||||
- user:
|
||||
name: Heavysnowjakarta
|
||||
email: heavysnowjakarta@gmail.com
|
||||
id: N01f9vhjfYidHDnkhVV4o
|
||||
type: comment
|
||||
comment: >-
|
||||
“libraries are
|
||||
databases”这一句并不容易翻译,这里把英文原文放上去的方式我觉得并不妥当,但是我想不到更好的译法了。定语往后放到谓语的位置。同时添加必要的助词。
|
||||
time: '2024-04-16T11:13:48.568Z'
|
||||
create_new_library_description:
|
||||
locales:
|
||||
zh-CN:
|
||||
comments:
|
||||
- user:
|
||||
name: Heavysnowjakarta
|
||||
email: heavysnowjakarta@gmail.com
|
||||
id: Wb89DhKwsCB9vGBDUIgsj
|
||||
type: comment
|
||||
comment: 见“create_library_description”。
|
||||
time: '2024-04-16T11:14:21.837Z'
|
||||
creating_your_library:
|
||||
locales:
|
||||
zh-CN:
|
||||
comments:
|
||||
- user:
|
||||
name: Heavysnowjakarta
|
||||
email: heavysnowjakarta@gmail.com
|
||||
id: 6q9xmFoeVizgSTBbBey9O
|
||||
type: comment
|
||||
comment: “您的库”是典型的翻译腔。
|
||||
time: '2024-04-16T11:15:52.949Z'
|
||||
delete_warning:
|
||||
locales:
|
||||
zh-CN:
|
||||
comments:
|
||||
- user:
|
||||
name: Heavysnowjakarta
|
||||
email: heavysnowjakarta@gmail.com
|
||||
id: 5oa5lvp8PkJDRceIenfne
|
||||
type: comment
|
||||
comment: 我不确定 `{{type}}` 是中文还是英文。如果是英文,前面应该加空格。
|
||||
time: '2024-04-16T11:24:52.250Z'
|
|
@ -91,7 +91,7 @@ If you encounter any issues, ensure that you are using the following versions of
|
|||
|
||||
- Rust version: **1.75**
|
||||
- Node version: **18.18**
|
||||
- Pnpm version: **9.0.2**
|
||||
- Pnpm version: **9.0.6**
|
||||
|
||||
After cleaning out your build artifacts using `pnpm clean`, `git clean`, or `cargo clean`, it is necessary to re-run the `setup-system` script.
|
||||
|
||||
|
|
406
Cargo.lock
generated
406
Cargo.lock
generated
|
@ -961,7 +961,7 @@ dependencies = [
|
|||
"serde_path_to_error",
|
||||
"serde_urlencoded",
|
||||
"sha1",
|
||||
"sync_wrapper 0.1.2",
|
||||
"sync_wrapper",
|
||||
"tokio",
|
||||
"tokio-tungstenite",
|
||||
"tower",
|
||||
|
@ -986,18 +986,6 @@ dependencies = [
|
|||
"tower-service",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "backon"
|
||||
version = "0.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c491fa80d69c03084223a4e73c378dd9f9a1e612eb54051213f88b2d5249b458"
|
||||
dependencies = [
|
||||
"fastrand 2.0.1",
|
||||
"futures-core",
|
||||
"pin-project",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "backtrace"
|
||||
version = "0.3.69"
|
||||
|
@ -1284,7 +1272,7 @@ version = "0.8.1"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2cb03d1bed155d89dce0f845b7899b18a9a163e148fd004e1c28421a783e2d8e"
|
||||
dependencies = [
|
||||
"block-padding 0.2.1",
|
||||
"block-padding",
|
||||
"cipher 0.3.0",
|
||||
]
|
||||
|
||||
|
@ -1294,15 +1282,6 @@ version = "0.2.1"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae"
|
||||
|
||||
[[package]]
|
||||
name = "block-padding"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a8894febbff9f758034a5b8e12d87918f56dfc64a8e1fe757d65e29041538d93"
|
||||
dependencies = [
|
||||
"generic-array 0.14.7",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "block-sys"
|
||||
version = "0.2.1"
|
||||
|
@ -1531,15 +1510,6 @@ dependencies = [
|
|||
"rustversion",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cbc"
|
||||
version = "0.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "26b52a9543ae338f279b96b0b9fed9c8093744685043739079ce85cd58f289a6"
|
||||
dependencies = [
|
||||
"cipher 0.4.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.0.83"
|
||||
|
@ -1873,26 +1843,6 @@ version = "0.9.6"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8"
|
||||
|
||||
[[package]]
|
||||
name = "const-random"
|
||||
version = "0.1.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359"
|
||||
dependencies = [
|
||||
"const-random-macro",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "const-random-macro"
|
||||
version = "0.1.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e"
|
||||
dependencies = [
|
||||
"getrandom 0.2.12",
|
||||
"once_cell",
|
||||
"tiny-keccak",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "constant_time_eq"
|
||||
version = "0.3.0"
|
||||
|
@ -2370,7 +2320,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||
checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c"
|
||||
dependencies = [
|
||||
"const-oid",
|
||||
"pem-rfc7468",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
|
@ -2448,7 +2397,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||
checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
|
||||
dependencies = [
|
||||
"block-buffer 0.10.4",
|
||||
"const-oid",
|
||||
"crypto-common 0.1.6",
|
||||
"subtle",
|
||||
]
|
||||
|
@ -2543,15 +2491,6 @@ dependencies = [
|
|||
"syn 2.0.48",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dlv-list"
|
||||
version = "0.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "442039f5147480ba31067cb00ada1adae6892028e40e45fc5de7b7df6dcc1b5f"
|
||||
dependencies = [
|
||||
"const-random",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dmmf"
|
||||
version = "0.1.0"
|
||||
|
@ -2935,12 +2874,6 @@ version = "0.1.9"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "86d4de0081402f5e88cdac65c8dcdcc73118c1a7a465e2a05f0da05843a8ea33"
|
||||
|
||||
[[package]]
|
||||
name = "flagset"
|
||||
version = "0.4.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cdeb3aa5e95cf9aabc17f060cfa0ced7b83f042390760ca53bf09df9968acaa1"
|
||||
|
||||
[[package]]
|
||||
name = "flate2"
|
||||
version = "1.0.28"
|
||||
|
@ -4185,7 +4118,6 @@ version = "0.1.3"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5"
|
||||
dependencies = [
|
||||
"block-padding 0.3.3",
|
||||
"generic-array 0.14.7",
|
||||
]
|
||||
|
||||
|
@ -4422,21 +4354,6 @@ dependencies = [
|
|||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "jsonwebtoken"
|
||||
version = "9.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b9ae10193d25051e74945f1ea2d0b42e03cc3b890f7e4cc5faa44997d808193f"
|
||||
dependencies = [
|
||||
"base64 0.21.7",
|
||||
"js-sys",
|
||||
"pem",
|
||||
"ring 0.17.7",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"simple_asn1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "kamadak-exif"
|
||||
version = "0.5.5"
|
||||
|
@ -4512,9 +4429,6 @@ name = "lazy_static"
|
|||
version = "1.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
dependencies = [
|
||||
"spin 0.5.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lazycell"
|
||||
|
@ -5394,7 +5308,7 @@ dependencies = [
|
|||
"metrics 0.19.0",
|
||||
"metrics-util 0.13.0",
|
||||
"parking_lot 0.11.2",
|
||||
"quanta 0.9.3",
|
||||
"quanta",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"tracing",
|
||||
|
@ -5427,7 +5341,7 @@ dependencies = [
|
|||
"num_cpus",
|
||||
"ordered-float",
|
||||
"parking_lot 0.11.2",
|
||||
"quanta 0.9.3",
|
||||
"quanta",
|
||||
"radix_trie",
|
||||
"sketches-ddsketch",
|
||||
]
|
||||
|
@ -5445,7 +5359,7 @@ dependencies = [
|
|||
"metrics 0.19.0",
|
||||
"num_cpus",
|
||||
"parking_lot 0.11.2",
|
||||
"quanta 0.9.3",
|
||||
"quanta",
|
||||
"sketches-ddsketch",
|
||||
]
|
||||
|
||||
|
@ -5533,30 +5447,6 @@ dependencies = [
|
|||
"tracing-subscriber",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "moka"
|
||||
version = "0.12.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b1911e88d5831f748a4097a43862d129e3c6fca831eecac9b8db6d01d93c9de2"
|
||||
dependencies = [
|
||||
"async-lock 2.8.0",
|
||||
"async-trait",
|
||||
"crossbeam-channel",
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-utils",
|
||||
"futures-util",
|
||||
"once_cell",
|
||||
"parking_lot 0.12.1",
|
||||
"quanta 0.12.3",
|
||||
"rustc_version",
|
||||
"skeptic",
|
||||
"smallvec 1.13.1",
|
||||
"tagptr",
|
||||
"thiserror",
|
||||
"triomphe",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "multiaddr"
|
||||
version = "0.18.1"
|
||||
|
@ -5917,23 +5807,6 @@ dependencies = [
|
|||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-bigint-dig"
|
||||
version = "0.8.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151"
|
||||
dependencies = [
|
||||
"byteorder",
|
||||
"lazy_static",
|
||||
"libm",
|
||||
"num-integer",
|
||||
"num-iter",
|
||||
"num-traits",
|
||||
"rand 0.8.5",
|
||||
"smallvec 1.13.1",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-complex"
|
||||
version = "0.4.4"
|
||||
|
@ -6124,37 +5997,6 @@ dependencies = [
|
|||
"windows-sys 0.42.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opendal"
|
||||
version = "0.45.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "52c17c077f23fa2d2c25d9d22af98baa43b8bbe2ef0de80cf66339aa70401467"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
"backon",
|
||||
"base64 0.21.7",
|
||||
"bytes",
|
||||
"chrono",
|
||||
"flagset",
|
||||
"futures",
|
||||
"getrandom 0.2.12",
|
||||
"http",
|
||||
"log",
|
||||
"md-5",
|
||||
"moka",
|
||||
"once_cell",
|
||||
"percent-encoding",
|
||||
"quick-xml",
|
||||
"reqsign",
|
||||
"reqwest",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha2 0.10.8",
|
||||
"tokio",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "opener"
|
||||
version = "0.6.1"
|
||||
|
@ -6259,16 +6101,6 @@ dependencies = [
|
|||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ordered-multimap"
|
||||
version = "0.7.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "49203cdcae0030493bad186b28da2fa25645fa276a51b6fec8010d281e02ef79"
|
||||
dependencies = [
|
||||
"dlv-list",
|
||||
"hashbrown 0.14.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ordered-stream"
|
||||
version = "0.2.0"
|
||||
|
@ -6501,16 +6333,6 @@ version = "0.2.1"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd"
|
||||
|
||||
[[package]]
|
||||
name = "pbkdf2"
|
||||
version = "0.12.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2"
|
||||
dependencies = [
|
||||
"digest 0.10.7",
|
||||
"hmac",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pdfium-render"
|
||||
version = "0.8.16"
|
||||
|
@ -6554,15 +6376,6 @@ dependencies = [
|
|||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pem-rfc7468"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412"
|
||||
dependencies = [
|
||||
"base64ct",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "percent-encoding"
|
||||
version = "2.3.1"
|
||||
|
@ -6829,32 +6642,6 @@ version = "1.0.1"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ad78bf43dcf80e8f950c92b84f938a0fc7590b7f6866fbcbeca781609c115590"
|
||||
|
||||
[[package]]
|
||||
name = "pkcs1"
|
||||
version = "0.7.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f"
|
||||
dependencies = [
|
||||
"der 0.7.8",
|
||||
"pkcs8 0.10.2",
|
||||
"spki 0.7.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pkcs5"
|
||||
version = "0.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e847e2c91a18bfa887dd028ec33f2fe6f25db77db3619024764914affe8b69a6"
|
||||
dependencies = [
|
||||
"aes 0.8.3",
|
||||
"cbc",
|
||||
"der 0.7.8",
|
||||
"pbkdf2",
|
||||
"scrypt",
|
||||
"sha2 0.10.8",
|
||||
"spki 0.7.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pkcs8"
|
||||
version = "0.9.0"
|
||||
|
@ -6872,8 +6659,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||
checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7"
|
||||
dependencies = [
|
||||
"der 0.7.8",
|
||||
"pkcs5",
|
||||
"rand_core 0.6.4",
|
||||
"spki 0.7.3",
|
||||
]
|
||||
|
||||
|
@ -7330,27 +7115,12 @@ dependencies = [
|
|||
"libc",
|
||||
"mach",
|
||||
"once_cell",
|
||||
"raw-cpuid 10.7.0",
|
||||
"raw-cpuid",
|
||||
"wasi 0.10.2+wasi-snapshot-preview1",
|
||||
"web-sys",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quanta"
|
||||
version = "0.12.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8e5167a477619228a0b284fac2674e3c388cba90631d7b7de620e6f1fcd08da5"
|
||||
dependencies = [
|
||||
"crossbeam-utils",
|
||||
"libc",
|
||||
"once_cell",
|
||||
"raw-cpuid 11.0.1",
|
||||
"wasi 0.11.0+wasi-snapshot-preview1",
|
||||
"web-sys",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "query-connector"
|
||||
version = "0.1.0"
|
||||
|
@ -7458,7 +7228,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||
checksum = "1004a344b30a54e2ee58d66a71b32d2db2feb0a31f9a2d302bf0536f15de2a33"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -7684,15 +7453,6 @@ dependencies = [
|
|||
"bitflags 1.3.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "raw-cpuid"
|
||||
version = "11.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9d86a7c4638d42c44551f4791a20e687dbb4c3de1f33c43dd71e355cd429def1"
|
||||
dependencies = [
|
||||
"bitflags 2.4.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "raw-window-handle"
|
||||
version = "0.5.2"
|
||||
|
@ -7837,37 +7597,6 @@ dependencies = [
|
|||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "reqsign"
|
||||
version = "0.14.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "43e319d9de9ff4d941abf4ac718897118b0fe04577ea3f8e0f5788971784eef5"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
"base64 0.21.7",
|
||||
"chrono",
|
||||
"form_urlencoded",
|
||||
"getrandom 0.2.12",
|
||||
"hex",
|
||||
"hmac",
|
||||
"home",
|
||||
"http",
|
||||
"jsonwebtoken",
|
||||
"log",
|
||||
"once_cell",
|
||||
"percent-encoding",
|
||||
"quick-xml",
|
||||
"rand 0.8.5",
|
||||
"reqwest",
|
||||
"rsa",
|
||||
"rust-ini",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha1",
|
||||
"sha2 0.10.8",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "request-handlers"
|
||||
version = "0.1.0"
|
||||
|
@ -7907,7 +7636,6 @@ dependencies = [
|
|||
"http",
|
||||
"http-body",
|
||||
"hyper",
|
||||
"hyper-rustls",
|
||||
"hyper-tls",
|
||||
"ipnet",
|
||||
"js-sys",
|
||||
|
@ -7917,16 +7645,12 @@ dependencies = [
|
|||
"once_cell",
|
||||
"percent-encoding",
|
||||
"pin-project-lite",
|
||||
"rustls",
|
||||
"rustls-native-certs",
|
||||
"rustls-pemfile",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_urlencoded",
|
||||
"system-configuration",
|
||||
"tokio",
|
||||
"tokio-native-tls",
|
||||
"tokio-rustls",
|
||||
"tokio-util",
|
||||
"tower-service",
|
||||
"url",
|
||||
|
@ -8086,27 +7810,6 @@ version = "0.19.0"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3cd14fd5e3b777a7422cca79358c57a8f6e3a703d9ac187448d0daf220c2407f"
|
||||
|
||||
[[package]]
|
||||
name = "rsa"
|
||||
version = "0.9.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5d0e5124fcb30e76a7e79bfee683a2746db83784b86289f6251b54b7950a0dfc"
|
||||
dependencies = [
|
||||
"const-oid",
|
||||
"digest 0.10.7",
|
||||
"num-bigint-dig",
|
||||
"num-integer",
|
||||
"num-traits",
|
||||
"pkcs1",
|
||||
"pkcs8 0.10.2",
|
||||
"rand_core 0.6.4",
|
||||
"sha2 0.10.8",
|
||||
"signature 2.2.0",
|
||||
"spki 0.7.3",
|
||||
"subtle",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rspc"
|
||||
version = "0.1.4"
|
||||
|
@ -8158,16 +7861,6 @@ dependencies = [
|
|||
"smallvec 1.13.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rust-ini"
|
||||
version = "0.20.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3e0698206bcb8882bf2a9ecb4c1e7785db57ff052297085a6efd4fe42302068a"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"ordered-multimap",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustc-demangle"
|
||||
version = "0.1.23"
|
||||
|
@ -8312,15 +8005,6 @@ version = "0.3.3"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072"
|
||||
|
||||
[[package]]
|
||||
name = "salsa20"
|
||||
version = "0.10.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "97a22f5af31f73a954c10289c93e8a50cc23d971e80ee446f1f6f7137a088213"
|
||||
dependencies = [
|
||||
"cipher 0.4.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "same-file"
|
||||
version = "1.0.6"
|
||||
|
@ -8412,17 +8096,6 @@ version = "1.2.0"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
|
||||
|
||||
[[package]]
|
||||
name = "scrypt"
|
||||
version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0516a385866c09368f0b5bcd1caff3366aace790fcd46e2bb032697bb172fd1f"
|
||||
dependencies = [
|
||||
"pbkdf2",
|
||||
"salsa20",
|
||||
"sha2 0.10.8",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sct"
|
||||
version = "0.7.1"
|
||||
|
@ -8514,7 +8187,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "sd-core"
|
||||
version = "0.2.11"
|
||||
version = "0.2.13"
|
||||
dependencies = [
|
||||
"aovec",
|
||||
"async-channel",
|
||||
|
@ -8549,7 +8222,6 @@ dependencies = [
|
|||
"normpath",
|
||||
"notify",
|
||||
"once_cell",
|
||||
"opendal",
|
||||
"openssl",
|
||||
"openssl-sys",
|
||||
"pin-project-lite",
|
||||
|
@ -8574,7 +8246,6 @@ dependencies = [
|
|||
"sd-ffmpeg",
|
||||
"sd-file-ext",
|
||||
"sd-images",
|
||||
"sd-indexer",
|
||||
"sd-media-metadata",
|
||||
"sd-p2p",
|
||||
"sd-p2p-block",
|
||||
|
@ -8593,7 +8264,6 @@ dependencies = [
|
|||
"static_assertions",
|
||||
"strum",
|
||||
"strum_macros",
|
||||
"sync_wrapper 1.0.1",
|
||||
"sysinfo",
|
||||
"tar",
|
||||
"tempfile",
|
||||
|
@ -8634,6 +8304,7 @@ version = "0.1.0"
|
|||
dependencies = [
|
||||
"async-channel",
|
||||
"async-trait",
|
||||
"blake3",
|
||||
"chrono",
|
||||
"futures",
|
||||
"futures-concurrency",
|
||||
|
@ -8648,6 +8319,7 @@ dependencies = [
|
|||
"sd-core-indexer-rules",
|
||||
"sd-core-prisma-helpers",
|
||||
"sd-core-sync",
|
||||
"sd-file-ext",
|
||||
"sd-prisma",
|
||||
"sd-sync",
|
||||
"sd-task-system",
|
||||
|
@ -8655,6 +8327,7 @@ dependencies = [
|
|||
"serde",
|
||||
"serde_json",
|
||||
"specta",
|
||||
"static_assertions",
|
||||
"strum",
|
||||
"tempfile",
|
||||
"thiserror",
|
||||
|
@ -8763,7 +8436,7 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "sd-desktop"
|
||||
version = "0.2.11"
|
||||
version = "0.2.13"
|
||||
dependencies = [
|
||||
"axum",
|
||||
"directories 5.0.1",
|
||||
|
@ -8868,30 +8541,6 @@ dependencies = [
|
|||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sd-indexer"
|
||||
version = "0.0.1"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"futures-util",
|
||||
"globset",
|
||||
"normpath",
|
||||
"opendal",
|
||||
"rmp-serde",
|
||||
"rspc",
|
||||
"sd-core-file-path-helper",
|
||||
"sd-core-indexer-rules",
|
||||
"sd-file-ext",
|
||||
"sd-prisma",
|
||||
"sd-utils",
|
||||
"serde",
|
||||
"specta",
|
||||
"tempfile",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sd-media-metadata"
|
||||
version = "0.0.0"
|
||||
|
@ -8960,7 +8609,7 @@ dependencies = [
|
|||
"specta",
|
||||
"stable-vec",
|
||||
"streamunordered",
|
||||
"sync_wrapper 0.1.2",
|
||||
"sync_wrapper",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
|
@ -9490,7 +9139,6 @@ version = "2.2.0"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de"
|
||||
dependencies = [
|
||||
"digest 0.10.7",
|
||||
"rand_core 0.6.4",
|
||||
]
|
||||
|
||||
|
@ -9500,18 +9148,6 @@ version = "0.3.7"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe"
|
||||
|
||||
[[package]]
|
||||
name = "simple_asn1"
|
||||
version = "0.6.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085"
|
||||
dependencies = [
|
||||
"num-bigint",
|
||||
"num-traits",
|
||||
"thiserror",
|
||||
"time",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "simplecss"
|
||||
version = "0.2.1"
|
||||
|
@ -9990,15 +9626,6 @@ version = "0.1.2"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160"
|
||||
|
||||
[[package]]
|
||||
name = "sync_wrapper"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394"
|
||||
dependencies = [
|
||||
"futures-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "synstructure"
|
||||
version = "0.12.6"
|
||||
|
@ -10530,15 +10157,6 @@ dependencies = [
|
|||
"time-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tiny-keccak"
|
||||
version = "2.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237"
|
||||
dependencies = [
|
||||
"crunchy",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tiny-skia"
|
||||
version = "0.11.4"
|
||||
|
|
|
@ -76,6 +76,7 @@ rmp-serde = "1.1.2"
|
|||
rmpv = { version = "^1.0.1", features = ["with-serde"] }
|
||||
serde = "1.0"
|
||||
serde_json = "1.0"
|
||||
static_assertions = "1.1.0"
|
||||
strum = "0.25"
|
||||
strum_macros = "0.25"
|
||||
tempfile = "3.8.1"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "sd-desktop"
|
||||
version = "0.2.11"
|
||||
version = "0.2.13"
|
||||
description = "The universal file manager."
|
||||
authors = ["Spacedrive Technology Inc <support@spacedrive.com>"]
|
||||
default-run = "sd-desktop"
|
||||
|
|
|
@ -7,6 +7,7 @@ use std::{
|
|||
collections::HashMap,
|
||||
fs,
|
||||
path::PathBuf,
|
||||
process::Command,
|
||||
sync::{Arc, Mutex, PoisonError},
|
||||
time::Duration,
|
||||
};
|
||||
|
@ -149,6 +150,47 @@ async fn open_logs_dir(node: tauri::State<'_, Arc<Node>>) -> Result<(), ()> {
|
|||
})
|
||||
}
|
||||
|
||||
#[tauri::command(async)]
|
||||
#[specta::specta]
|
||||
async fn open_trash_in_os_explorer() -> Result<(), ()> {
|
||||
#[cfg(target_os = "macos")]
|
||||
{
|
||||
let full_path = format!("{}/.Trash/", std::env::var("HOME").unwrap());
|
||||
|
||||
Command::new("open")
|
||||
.arg(full_path)
|
||||
.spawn()
|
||||
.map_err(|err| error!("Error opening trash: {err:#?}"))?
|
||||
.wait()
|
||||
.map_err(|err| error!("Error opening trash: {err:#?}"))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
{
|
||||
Command::new("explorer")
|
||||
.arg("shell:RecycleBinFolder")
|
||||
.spawn()
|
||||
.map_err(|err| error!("Error opening trash: {err:#?}"))?
|
||||
.wait()
|
||||
.map_err(|err| error!("Error opening trash: {err:#?}"))?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
Command::new("xdg-open")
|
||||
.arg("~/.local/share/Trash/")
|
||||
.spawn()
|
||||
.map_err(|err| error!("Error opening trash: {err:#?}"))?
|
||||
.wait()
|
||||
.map_err(|err| error!("Error opening trash: {err:#?}"))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, specta::Type, tauri_specta::Event)]
|
||||
#[serde(tag = "type")]
|
||||
pub enum DragAndDropEvent {
|
||||
|
@ -218,6 +260,7 @@ async fn main() -> tauri::Result<()> {
|
|||
reload_webview,
|
||||
set_menu_bar_item_state,
|
||||
request_fda_macos,
|
||||
open_trash_in_os_explorer,
|
||||
file::open_file_paths,
|
||||
file::open_ephemeral_files,
|
||||
file::get_file_path_open_with_apps,
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
"macOSPrivateApi": true,
|
||||
"bundle": {
|
||||
"active": true,
|
||||
"publisher": "Spacedrive Technology Inc.",
|
||||
"category": "Productivity",
|
||||
"targets": ["deb", "msi", "dmg", "updater"],
|
||||
"identifier": "com.spacedrive.desktop",
|
||||
"icon": [
|
||||
|
@ -24,7 +26,7 @@
|
|||
"resources": {},
|
||||
"externalBin": [],
|
||||
"copyright": "Spacedrive Technology Inc.",
|
||||
"shortDescription": "File explorer from the future.",
|
||||
"shortDescription": "Spacedrive",
|
||||
"longDescription": "Cross-platform universal file explorer, powered by an open-source virtual distributed filesystem.",
|
||||
"deb": {
|
||||
"files": {
|
||||
|
|
|
@ -41,6 +41,17 @@ export const commands = {
|
|||
async requestFdaMacos(): Promise<null> {
|
||||
return await TAURI_INVOKE('plugin:tauri-specta|request_fda_macos');
|
||||
},
|
||||
async openTrashInOsExplorer(): Promise<__Result__<null, null>> {
|
||||
try {
|
||||
return {
|
||||
status: 'ok',
|
||||
data: await TAURI_INVOKE('plugin:tauri-specta|open_trash_in_os_explorer')
|
||||
};
|
||||
} catch (e) {
|
||||
if (e instanceof Error) throw e;
|
||||
else return { status: 'error', error: e as any };
|
||||
}
|
||||
},
|
||||
async openFilePaths(
|
||||
library: string,
|
||||
ids: number[]
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
import { useNavigation } from '@react-navigation/native';
|
||||
import { useCache, useLibraryQuery, useNodes } from '@sd/client';
|
||||
import { DotsThreeOutline, Plus } from 'phosphor-react-native';
|
||||
import { useRef } from 'react';
|
||||
import { Text, View } from 'react-native';
|
||||
import { useCache, useLibraryQuery, useNodes } from '@sd/client';
|
||||
import { ModalRef } from '~/components/layout/Modal';
|
||||
import { tw } from '~/lib/tailwind';
|
||||
import { BrowseStackScreenProps } from '~/navigation/tabs/BrowseStack';
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
import { useNavigation } from '@react-navigation/native';
|
||||
import { useCache, useLibraryQuery, useNodes } from '@sd/client';
|
||||
import { DotsThreeOutline, Plus } from 'phosphor-react-native';
|
||||
import React, { useRef } from 'react';
|
||||
import { Text, View } from 'react-native';
|
||||
import { useCache, useLibraryQuery, useNodes } from '@sd/client';
|
||||
import { ModalRef } from '~/components/layout/Modal';
|
||||
import { tw } from '~/lib/tailwind';
|
||||
import { BrowseStackScreenProps } from '~/navigation/tabs/BrowseStack';
|
||||
|
|
110
apps/mobile/src/components/header/DynamicHeader.tsx
Normal file
110
apps/mobile/src/components/header/DynamicHeader.tsx
Normal file
|
@ -0,0 +1,110 @@
|
|||
import { DrawerNavigationHelpers } from '@react-navigation/drawer/lib/typescript/src/types';
|
||||
import { RouteProp, useNavigation } from '@react-navigation/native';
|
||||
import { NativeStackHeaderProps } from '@react-navigation/native-stack';
|
||||
import { ArrowLeft, DotsThreeOutline, MagnifyingGlass } from 'phosphor-react-native';
|
||||
import { Platform, Pressable, Text, View } from 'react-native';
|
||||
import { useSafeAreaInsets } from 'react-native-safe-area-context';
|
||||
import { tw, twStyle } from '~/lib/tailwind';
|
||||
import { getExplorerStore, useExplorerStore } from '~/stores/explorerStore';
|
||||
import { Icon } from '../icons/Icon';
|
||||
|
||||
|
||||
type Props = {
|
||||
headerRoute?: NativeStackHeaderProps; //supporting title from the options object of navigation
|
||||
optionsRoute?: RouteProp<any, any>; //supporting params passed
|
||||
kind: 'tag' | 'location'; //the kind of icon to display
|
||||
explorerMenu?: boolean; //whether to show the explorer menu
|
||||
};
|
||||
|
||||
export default function DynamicHeader({
|
||||
headerRoute,
|
||||
optionsRoute,
|
||||
kind,
|
||||
explorerMenu = true
|
||||
}: Props) {
|
||||
const navigation = useNavigation<DrawerNavigationHelpers>();
|
||||
const headerHeight = useSafeAreaInsets().top;
|
||||
const isAndroid = Platform.OS === 'android';
|
||||
const explorerStore = useExplorerStore();
|
||||
|
||||
return (
|
||||
<View
|
||||
style={twStyle('relative h-auto w-full border-b border-app-cardborder bg-app-header', {
|
||||
paddingTop: headerHeight + (isAndroid ? 15 : 0)
|
||||
})}
|
||||
>
|
||||
<View style={tw`mx-auto h-auto w-full justify-center px-5 pb-3`}>
|
||||
<View style={tw`w-full flex-row items-center justify-between`}>
|
||||
<View style={tw`flex-row items-center gap-3`}>
|
||||
<Pressable
|
||||
hitSlop={24}
|
||||
onPress={() => navigation.goBack()}
|
||||
>
|
||||
<ArrowLeft size={23} color={tw.color('ink')} />
|
||||
</Pressable>
|
||||
<View style={tw`flex-row items-center gap-1.5`}>
|
||||
<HeaderIconKind routeParams={optionsRoute?.params} kind={kind} />
|
||||
<Text
|
||||
numberOfLines={1}
|
||||
style={tw`max-w-[200px] text-xl font-bold text-white`}
|
||||
>
|
||||
{headerRoute?.options.title}
|
||||
</Text>
|
||||
</View>
|
||||
</View>
|
||||
<View style={tw`flex-row gap-3`}>
|
||||
{explorerMenu && <Pressable
|
||||
hitSlop={12}
|
||||
onPress={() => {
|
||||
getExplorerStore().toggleMenu = !explorerStore.toggleMenu;
|
||||
}}
|
||||
>
|
||||
<DotsThreeOutline
|
||||
size={24}
|
||||
color={tw.color(
|
||||
explorerStore.toggleMenu ? 'text-accent' : 'text-zinc-300'
|
||||
)}
|
||||
/>
|
||||
</Pressable>}
|
||||
<Pressable
|
||||
hitSlop={12}
|
||||
onPress={() => {
|
||||
navigation.navigate('SearchStack', {
|
||||
screen: 'Search'
|
||||
});
|
||||
}}
|
||||
>
|
||||
<MagnifyingGlass
|
||||
size={24}
|
||||
weight="bold"
|
||||
color={tw.color('text-zinc-300')}
|
||||
/>
|
||||
</Pressable>
|
||||
</View>
|
||||
</View>
|
||||
</View>
|
||||
</View>
|
||||
);
|
||||
}
|
||||
|
||||
interface HeaderIconKindProps {
|
||||
routeParams?: any;
|
||||
kind: Props['kind'];
|
||||
}
|
||||
|
||||
const HeaderIconKind = ({routeParams, kind }: HeaderIconKindProps) => {
|
||||
switch (kind) {
|
||||
case 'location':
|
||||
return <Icon size={30} name="Folder" />;
|
||||
case 'tag':
|
||||
return (
|
||||
<View
|
||||
style={twStyle('h-[24px] w-[24px] rounded-full', {
|
||||
backgroundColor: routeParams.color
|
||||
})}
|
||||
/>
|
||||
);
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
};
|
|
@ -1,48 +1,25 @@
|
|||
import { DrawerNavigationHelpers } from '@react-navigation/drawer/lib/typescript/src/types';
|
||||
import { useNavigation } from '@react-navigation/native';
|
||||
import { NativeStackHeaderProps } from '@react-navigation/native-stack';
|
||||
import { ArrowLeft, DotsThreeOutline, List, MagnifyingGlass } from 'phosphor-react-native';
|
||||
import { RouteProp, useNavigation } from '@react-navigation/native';
|
||||
import { ArrowLeft, List, MagnifyingGlass } from 'phosphor-react-native';
|
||||
import { Platform, Pressable, Text, View } from 'react-native';
|
||||
import { useSafeAreaInsets } from 'react-native-safe-area-context';
|
||||
import { tw, twStyle } from '~/lib/tailwind';
|
||||
import { getExplorerStore, useExplorerStore } from '~/stores/explorerStore';
|
||||
|
||||
import { Icon } from '../icons/Icon';
|
||||
import Search from '../search/Search';
|
||||
|
||||
type HeaderProps = {
|
||||
title?: string; //title of the page
|
||||
showSearch?: boolean; //show the search button
|
||||
showDrawer?: boolean; //show the drawer button
|
||||
searchType?: 'explorer' | 'location' | 'categories'; //Temporary
|
||||
navBack?: boolean; //navigate back to the previous screen
|
||||
headerKind?: 'default' | 'location' | 'tag'; //kind of header
|
||||
route?: never;
|
||||
routeTitle?: never;
|
||||
type Props = {
|
||||
route?: RouteProp<any, any>; // supporting title from the options object of navigation
|
||||
navBack?: boolean; // whether to show the back icon
|
||||
search?: boolean; // whether to show the search icon
|
||||
title?: string; // in some cases - we want to override the route title
|
||||
};
|
||||
|
||||
//you can pass in a routeTitle only if route is passed in
|
||||
type Props =
|
||||
| HeaderProps
|
||||
| ({
|
||||
route: NativeStackHeaderProps;
|
||||
routeTitle?: boolean;
|
||||
} & Omit<HeaderProps, 'route' | 'routeTitle'>);
|
||||
|
||||
// Default header with search bar and button to open drawer
|
||||
export default function Header({
|
||||
title,
|
||||
searchType,
|
||||
navBack,
|
||||
route,
|
||||
routeTitle,
|
||||
headerKind = 'default',
|
||||
showDrawer = false,
|
||||
showSearch = true
|
||||
navBack,
|
||||
title,
|
||||
search = false
|
||||
}: Props) {
|
||||
const navigation = useNavigation<DrawerNavigationHelpers>();
|
||||
const explorerStore = useExplorerStore();
|
||||
const routeParams = route?.route.params as any;
|
||||
const headerHeight = useSafeAreaInsets().top;
|
||||
const isAndroid = Platform.OS === 'android';
|
||||
|
||||
|
@ -52,38 +29,25 @@ export default function Header({
|
|||
paddingTop: headerHeight + (isAndroid ? 15 : 0)
|
||||
})}
|
||||
>
|
||||
<View style={tw`mx-auto h-auto w-full justify-center px-5 pb-4`}>
|
||||
<View style={tw`mx-auto h-auto w-full justify-center px-5 pb-3`}>
|
||||
<View style={tw`w-full flex-row items-center justify-between`}>
|
||||
<View style={tw`flex-row items-center gap-3`}>
|
||||
{navBack && (
|
||||
{navBack ? (
|
||||
<Pressable
|
||||
hitSlop={24}
|
||||
onPress={() => {
|
||||
navigation.goBack();
|
||||
}}
|
||||
>
|
||||
<ArrowLeft size={23} color={tw.color('ink')} />
|
||||
</Pressable>
|
||||
)}
|
||||
<View style={tw`flex-row items-center gap-2`}>
|
||||
<HeaderIconKind headerKind={headerKind} routeParams={routeParams} />
|
||||
{showDrawer && (
|
||||
<Pressable onPress={() => navigation.openDrawer()}>
|
||||
<List size={24} color={tw.color('text-zinc-300')} />
|
||||
</Pressable>
|
||||
)}
|
||||
<Text
|
||||
numberOfLines={1}
|
||||
style={tw`max-w-[200px] text-xl font-bold text-white`}
|
||||
>
|
||||
{title || (routeTitle && route?.options.title)}
|
||||
</Text>
|
||||
</View>
|
||||
hitSlop={24}
|
||||
onPress={() => navigation.goBack()}
|
||||
>
|
||||
<ArrowLeft size={24} color={tw.color('ink')} />
|
||||
</Pressable>
|
||||
|
||||
) : (
|
||||
<Pressable onPress={() => navigation.openDrawer()}>
|
||||
<List size={24} color={tw.color('ink')} />
|
||||
</Pressable>
|
||||
)}
|
||||
<Text style={tw`text-xl font-bold text-ink`}>{title || route?.name}</Text>
|
||||
</View>
|
||||
<View style={tw`relative flex-row items-center gap-3`}>
|
||||
{showSearch && (
|
||||
<View style={tw`flex-row items-center gap-2`}>
|
||||
<Pressable
|
||||
{search && <Pressable
|
||||
hitSlop={24}
|
||||
onPress={() => {
|
||||
navigation.navigate('SearchStack', {
|
||||
|
@ -96,67 +60,9 @@ export default function Header({
|
|||
weight="bold"
|
||||
color={tw.color('text-zinc-300')}
|
||||
/>
|
||||
</Pressable>
|
||||
</View>
|
||||
)}
|
||||
{(headerKind === 'location' || headerKind === 'tag') && (
|
||||
<Pressable
|
||||
hitSlop={24}
|
||||
onPress={() => {
|
||||
getExplorerStore().toggleMenu = !explorerStore.toggleMenu;
|
||||
}}
|
||||
>
|
||||
<DotsThreeOutline
|
||||
size={24}
|
||||
color={tw.color(
|
||||
explorerStore.toggleMenu ? 'text-accent' : 'text-zinc-300'
|
||||
)}
|
||||
/>
|
||||
</Pressable>
|
||||
)}
|
||||
</View>
|
||||
</Pressable>}
|
||||
</View>
|
||||
{searchType && <HeaderSearchType searchType={searchType} />}
|
||||
</View>
|
||||
</View>
|
||||
);
|
||||
}
|
||||
|
||||
interface HeaderSearchTypeProps {
|
||||
searchType: HeaderProps['searchType'];
|
||||
}
|
||||
|
||||
const HeaderSearchType = ({ searchType }: HeaderSearchTypeProps) => {
|
||||
switch (searchType) {
|
||||
case 'explorer':
|
||||
return 'Explorer'; //TODO
|
||||
case 'location':
|
||||
return <Search placeholder="Location name..." />;
|
||||
case 'categories':
|
||||
return <Search placeholder="Category name..." />;
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
interface HeaderIconKindProps {
|
||||
headerKind: HeaderProps['headerKind'];
|
||||
routeParams?: any;
|
||||
}
|
||||
|
||||
const HeaderIconKind = ({ headerKind, routeParams }: HeaderIconKindProps) => {
|
||||
switch (headerKind) {
|
||||
case 'location':
|
||||
return <Icon size={30} name="Folder" />;
|
||||
case 'tag':
|
||||
return (
|
||||
<View
|
||||
style={twStyle('h-[30px] w-[30px] rounded-full', {
|
||||
backgroundColor: routeParams.color
|
||||
})}
|
||||
/>
|
||||
);
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
|
53
apps/mobile/src/components/header/SearchHeader.tsx
Normal file
53
apps/mobile/src/components/header/SearchHeader.tsx
Normal file
|
@ -0,0 +1,53 @@
|
|||
import { DrawerNavigationHelpers } from '@react-navigation/drawer/lib/typescript/src/types';
|
||||
import { RouteProp, useNavigation } from '@react-navigation/native';
|
||||
import { ArrowLeft } from 'phosphor-react-native';
|
||||
import { Platform, Pressable, Text, View } from 'react-native';
|
||||
import { useSafeAreaInsets } from 'react-native-safe-area-context';
|
||||
import { tw, twStyle } from '~/lib/tailwind';
|
||||
import Search from '../search/Search';
|
||||
|
||||
|
||||
const searchPlaceholder = {
|
||||
locations: 'Search location name...',
|
||||
tags: 'Search tag name...',
|
||||
categories: 'Search category name...',
|
||||
}
|
||||
|
||||
type Props = {
|
||||
route?: RouteProp<any, any>; // supporting title from the options object of navigation
|
||||
kind: keyof typeof searchPlaceholder; // the kind of search we are doing
|
||||
title?: string; // in some cases - we want to override the route title
|
||||
};
|
||||
|
||||
export default function SearchHeader({
|
||||
route,
|
||||
kind,
|
||||
title
|
||||
}: Props) {
|
||||
const navigation = useNavigation<DrawerNavigationHelpers>();
|
||||
const headerHeight = useSafeAreaInsets().top;
|
||||
const isAndroid = Platform.OS === 'android';
|
||||
|
||||
return (
|
||||
<View
|
||||
style={twStyle('relative h-auto w-full border-b border-app-cardborder bg-app-header', {
|
||||
paddingTop: headerHeight + (isAndroid ? 15 : 0)
|
||||
})}
|
||||
>
|
||||
<View style={tw`mx-auto h-auto w-full justify-center px-5 pb-3`}>
|
||||
<View style={tw`w-full flex-row items-center justify-between`}>
|
||||
<View style={tw`flex-row items-center gap-3`}>
|
||||
<Pressable
|
||||
hitSlop={24}
|
||||
onPress={() => navigation.goBack()}
|
||||
>
|
||||
<ArrowLeft size={24} color={tw.color('ink')} />
|
||||
</Pressable>
|
||||
<Text style={tw`text-xl font-bold text-ink`}>{title || route?.name}</Text>
|
||||
</View>
|
||||
</View>
|
||||
<Search placeholder={searchPlaceholder[kind]} />
|
||||
</View>
|
||||
</View>
|
||||
);
|
||||
}
|
|
@ -1,9 +1,7 @@
|
|||
import { useRoute } from '@react-navigation/native';
|
||||
import { DimensionValue, Platform } from 'react-native';
|
||||
import LinearGradient from 'react-native-linear-gradient';
|
||||
import { ClassInput } from 'twrnc';
|
||||
import { tw, twStyle } from '~/lib/tailwind';
|
||||
import { useExplorerStore } from '~/stores/explorerStore';
|
||||
|
||||
interface Props {
|
||||
children: React.ReactNode; // children of fade
|
||||
|
@ -13,7 +11,6 @@ interface Props {
|
|||
orientation?: 'horizontal' | 'vertical'; // orientation of fade
|
||||
fadeSides?: 'left-right' | 'top-bottom'; // which sides to fade
|
||||
screenFade?: boolean; // if true, the fade will consider the bottom tab bar height
|
||||
noConditions?: boolean; // if true, the fade will be rendered as is
|
||||
bottomFadeStyle?: ClassInput; // tailwind style for bottom fade
|
||||
topFadeStyle?: ClassInput; // tailwind style for top fade
|
||||
}
|
||||
|
@ -25,20 +22,15 @@ const Fade = ({
|
|||
height,
|
||||
bottomFadeStyle,
|
||||
topFadeStyle,
|
||||
noConditions = false,
|
||||
screenFade = false,
|
||||
fadeSides = 'left-right',
|
||||
orientation = 'horizontal'
|
||||
}: Props) => {
|
||||
const route = useRoute();
|
||||
const { toggleMenu } = useExplorerStore();
|
||||
const bottomTabBarHeight = Platform.OS === 'ios' ? 80 : 60;
|
||||
const gradientStartEndMap = {
|
||||
'left-right': { start: { x: 0, y: 0 }, end: { x: 1, y: 0 } },
|
||||
'top-bottom': { start: { x: 0, y: 1 }, end: { x: 0, y: 0 } }
|
||||
};
|
||||
const menuHeight = 57; // height of the explorer menu
|
||||
const routesWithMenu = ['Location', 'Search', 'Tag']; // routes that are associated with the explorer
|
||||
return (
|
||||
<>
|
||||
<LinearGradient
|
||||
|
@ -46,10 +38,7 @@ const Fade = ({
|
|||
width: orientation === 'vertical' ? height : width,
|
||||
height: orientation === 'vertical' ? width : height,
|
||||
position: 'absolute',
|
||||
top:
|
||||
!noConditions && toggleMenu && routesWithMenu.includes(route.name)
|
||||
? menuHeight
|
||||
: 0,
|
||||
top: 0,
|
||||
alignSelf: 'center',
|
||||
left: fadeSides === 'left-right' ? 0 : undefined,
|
||||
transform: fadeSides === 'left-right' ? undefined : [{ rotate: '180deg' }],
|
||||
|
|
|
@ -3,8 +3,6 @@ import { Platform, ScrollView, View } from 'react-native';
|
|||
import { ClassInput } from 'twrnc/dist/esm/types';
|
||||
import { tw, twStyle } from '~/lib/tailwind';
|
||||
|
||||
import Fade from './Fade';
|
||||
|
||||
interface Props {
|
||||
children: ReactNode;
|
||||
/** If true, the container will be a ScrollView */
|
||||
|
@ -13,16 +11,11 @@ interface Props {
|
|||
/** If true, the bottom tab bar height will be added to the bottom of the container */
|
||||
tabHeight?: boolean;
|
||||
scrollToBottomOnChange?: boolean;
|
||||
/** Styling of both side fades */
|
||||
topFadeStyle?: string;
|
||||
bottomFadeStyle?: string;
|
||||
}
|
||||
|
||||
const ScreenContainer = ({
|
||||
children,
|
||||
style,
|
||||
topFadeStyle,
|
||||
bottomFadeStyle,
|
||||
scrollview = true,
|
||||
tabHeight = true,
|
||||
scrollToBottomOnChange = false
|
||||
|
@ -31,16 +24,6 @@ const ScreenContainer = ({
|
|||
const bottomTabBarHeight = Platform.OS === 'ios' ? 80 : 60;
|
||||
return scrollview ? (
|
||||
<View style={tw`relative flex-1`}>
|
||||
<Fade
|
||||
topFadeStyle={topFadeStyle}
|
||||
bottomFadeStyle={bottomFadeStyle}
|
||||
screenFade
|
||||
fadeSides="top-bottom"
|
||||
orientation="vertical"
|
||||
color="black"
|
||||
width={30}
|
||||
height="100%"
|
||||
>
|
||||
<ScrollView
|
||||
ref={ref}
|
||||
onContentSizeChange={() => {
|
||||
|
@ -55,20 +38,9 @@ const ScreenContainer = ({
|
|||
>
|
||||
{children}
|
||||
</ScrollView>
|
||||
</Fade>
|
||||
</View>
|
||||
) : (
|
||||
<View style={tw`relative flex-1`}>
|
||||
<Fade
|
||||
topFadeStyle={topFadeStyle}
|
||||
bottomFadeStyle={bottomFadeStyle}
|
||||
screenFade
|
||||
fadeSides="top-bottom"
|
||||
orientation="vertical"
|
||||
color="black"
|
||||
width={30}
|
||||
height="100%"
|
||||
>
|
||||
<View
|
||||
style={twStyle(
|
||||
'flex-1 justify-between gap-10 bg-black py-6',
|
||||
|
@ -78,7 +50,6 @@ const ScreenContainer = ({
|
|||
>
|
||||
{children}
|
||||
</View>
|
||||
</Fade>
|
||||
</View>
|
||||
);
|
||||
};
|
||||
|
|
|
@ -28,7 +28,7 @@ const GridLocation: React.FC<GridLocationProps> = ({ location, modalRef }: GridL
|
|||
)}
|
||||
/>
|
||||
</View>
|
||||
<Pressable hitSlop={24} onPress={() => modalRef.current?.present()}>
|
||||
<Pressable onPress={() => modalRef.current?.present()}>
|
||||
<DotsThreeOutlineVertical
|
||||
weight="fill"
|
||||
size={20}
|
||||
|
|
|
@ -26,7 +26,6 @@ const Locations = () => {
|
|||
<>
|
||||
<OverviewSection title="Locations" count={locations?.length}>
|
||||
<View style={tw`flex-row items-center`}>
|
||||
<Fade height={'100%'} width={30} color="black">
|
||||
<FlatList
|
||||
horizontal
|
||||
data={locations}
|
||||
|
@ -77,7 +76,6 @@ const Locations = () => {
|
|||
</Pressable>
|
||||
)}
|
||||
/>
|
||||
</Fade>
|
||||
</View>
|
||||
</OverviewSection>
|
||||
<ImportModal ref={modalRef} />
|
||||
|
|
|
@ -18,7 +18,7 @@ export default function Search({ placeholder }: Props) {
|
|||
}, [searchStore]);
|
||||
return (
|
||||
<View
|
||||
style={tw`mt-4 flex h-11 w-full flex-row items-center justify-between rounded-md border border-app-inputborder bg-app-input px-3 shadow-sm`}
|
||||
style={tw`mt-3 h-10 w-full flex-row items-center justify-between rounded-md border border-app-inputborder bg-app-input px-3 shadow-sm`}
|
||||
>
|
||||
<TextInput
|
||||
onChangeText={(text) => searchStore.setSearch(text)}
|
||||
|
|
|
@ -47,7 +47,7 @@ const FiltersBar = () => {
|
|||
<Plus weight="bold" size={20} color={tw.color('text-ink-dull')} />
|
||||
</Button>
|
||||
<View style={tw`relative flex-1`}>
|
||||
<Fade noConditions height={'100%'} width={30} color="app-header">
|
||||
<Fade height={'100%'} width={30} color="app-header">
|
||||
<FlatList
|
||||
ref={flatListRef}
|
||||
showsHorizontalScrollIndicator={false}
|
||||
|
|
|
@ -20,7 +20,7 @@ const GridTag = ({ tag, modalRef }: GridTagProps) => {
|
|||
backgroundColor: tag.color!
|
||||
})}
|
||||
/>
|
||||
<Pressable hitSlop={24} onPress={() => modalRef.current?.present()}>
|
||||
<Pressable onPress={() => modalRef.current?.present()}>
|
||||
<DotsThreeOutlineVertical
|
||||
weight="fill"
|
||||
size={20}
|
||||
|
|
|
@ -21,7 +21,7 @@ export default function SearchStack() {
|
|||
component={FiltersScreen}
|
||||
options={{
|
||||
header: () => {
|
||||
return <Header navBack showSearch={false} title="Search filters" />;
|
||||
return <Header navBack title="Search filters" />;
|
||||
}
|
||||
}}
|
||||
/>
|
||||
|
|
|
@ -8,6 +8,8 @@ import LocationsScreen from '~/screens/browse/Locations';
|
|||
import TagScreen from '~/screens/browse/Tag';
|
||||
import TagsScreen from '~/screens/browse/Tags';
|
||||
|
||||
import DynamicHeader from '~/components/header/DynamicHeader';
|
||||
import SearchHeader from '~/components/header/SearchHeader';
|
||||
import { TabScreenProps } from '../TabNavigator';
|
||||
|
||||
const Stack = createNativeStackNavigator<BrowseStackParamList>();
|
||||
|
@ -18,44 +20,44 @@ export default function BrowseStack() {
|
|||
<Stack.Screen
|
||||
name="Browse"
|
||||
component={BrowseScreen}
|
||||
options={{ header: () => <Header showDrawer title="Browse" /> }}
|
||||
options={({route}) => ({
|
||||
header: () => <Header search route={route} />
|
||||
})}
|
||||
/>
|
||||
<Stack.Screen
|
||||
name="Location"
|
||||
component={LocationScreen}
|
||||
options={{
|
||||
header: (route) => (
|
||||
<Header route={route} headerKind="location" routeTitle navBack />
|
||||
)
|
||||
}}
|
||||
options={({route: optionsRoute}) => ({
|
||||
header: (route) => <DynamicHeader optionsRoute={optionsRoute} headerRoute={route} kind="location" />
|
||||
})}
|
||||
/>
|
||||
<Stack.Screen
|
||||
name="Tags"
|
||||
component={TagsScreen}
|
||||
options={{
|
||||
header: () => <Header navBack title="Tags" />
|
||||
}}
|
||||
options={({route}) => ({
|
||||
header: () => <SearchHeader kind="tags" route={route} />
|
||||
})}
|
||||
/>
|
||||
<Stack.Screen
|
||||
name="Locations"
|
||||
component={LocationsScreen}
|
||||
options={{
|
||||
header: () => <Header navBack searchType="location" title="Locations" />
|
||||
}}
|
||||
options={({route}) => ({
|
||||
header: () => <SearchHeader kind="locations" route={route} />
|
||||
})}
|
||||
/>
|
||||
<Stack.Screen
|
||||
name="Tag"
|
||||
component={TagScreen}
|
||||
options={{
|
||||
header: (route) => <Header navBack routeTitle route={route} headerKind="tag" />
|
||||
}}
|
||||
options={({route: optionsRoute}) => ({
|
||||
header: (route) => <DynamicHeader optionsRoute={optionsRoute} headerRoute={route} kind="tag" />
|
||||
})}
|
||||
/>
|
||||
<Stack.Screen
|
||||
name="Library"
|
||||
component={LibraryScreen}
|
||||
options={{
|
||||
header: () => <Header navBack title="Library" />
|
||||
}}
|
||||
options={({route}) => ({
|
||||
header: () => <Header navBack route={route} />
|
||||
})}
|
||||
/>
|
||||
</Stack.Navigator>
|
||||
);
|
||||
|
|
|
@ -13,7 +13,9 @@ export default function NetworkStack() {
|
|||
<Stack.Screen
|
||||
name="Network"
|
||||
component={NetworkScreen}
|
||||
options={{ header: () => <Header showDrawer title="Network" /> }}
|
||||
options={({route}) => ({
|
||||
header: () => <Header search route={route} />
|
||||
})}
|
||||
/>
|
||||
</Stack.Navigator>
|
||||
);
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
import { CompositeScreenProps } from '@react-navigation/native';
|
||||
import { createNativeStackNavigator, NativeStackScreenProps } from '@react-navigation/native-stack';
|
||||
import Header from '~/components/header/Header';
|
||||
import CategoriesScreen from '~/screens/overview/Categories';
|
||||
import OverviewScreen from '~/screens/overview/Overview';
|
||||
|
||||
import Header from '~/components/header/Header';
|
||||
import SearchHeader from '~/components/header/SearchHeader';
|
||||
import { TabScreenProps } from '../TabNavigator';
|
||||
|
||||
const Stack = createNativeStackNavigator<OverviewStackParamList>();
|
||||
|
@ -14,14 +15,16 @@ export default function OverviewStack() {
|
|||
<Stack.Screen
|
||||
name="Overview"
|
||||
component={OverviewScreen}
|
||||
options={{ header: () => <Header showDrawer title="Overview" /> }}
|
||||
options={({route}) => ({
|
||||
header: () => <Header search route={route} />
|
||||
})}
|
||||
/>
|
||||
<Stack.Screen
|
||||
name="Categories"
|
||||
component={CategoriesScreen}
|
||||
options={{
|
||||
header: () => <Header searchType="categories" navBack title="Categories" />
|
||||
}}
|
||||
options={({route}) => ({
|
||||
header: () => <SearchHeader kind="categories" route={route} />
|
||||
})}
|
||||
/>
|
||||
</Stack.Navigator>
|
||||
);
|
||||
|
|
|
@ -18,6 +18,7 @@ import NodesSettingsScreen from '~/screens/settings/library/NodesSettings';
|
|||
import TagsSettingsScreen from '~/screens/settings/library/TagsSettings';
|
||||
import SettingsScreen from '~/screens/settings/Settings';
|
||||
|
||||
import SearchHeader from '~/components/header/SearchHeader';
|
||||
import { TabScreenProps } from '../TabNavigator';
|
||||
|
||||
const Stack = createNativeStackNavigator<SettingsStackParamList>();
|
||||
|
@ -28,7 +29,9 @@ export default function SettingsStack() {
|
|||
<Stack.Screen
|
||||
name="Settings"
|
||||
component={SettingsScreen}
|
||||
options={{ header: () => <Header showDrawer title="Settings" /> }}
|
||||
options={({route}) => ({
|
||||
header: () => <Header search route={route} />
|
||||
})}
|
||||
/>
|
||||
{/* Client */}
|
||||
<Stack.Screen
|
||||
|
@ -65,9 +68,9 @@ export default function SettingsStack() {
|
|||
<Stack.Screen
|
||||
name="LocationSettings"
|
||||
component={LocationSettingsScreen}
|
||||
options={{
|
||||
header: () => <Header searchType="location" navBack title="Locations" />
|
||||
}}
|
||||
options={() => ({
|
||||
header: () => <SearchHeader title="Locations" kind="locations" />
|
||||
})}
|
||||
/>
|
||||
<Stack.Screen
|
||||
name="EditLocationSettings"
|
||||
|
|
|
@ -1,17 +1,18 @@
|
|||
import { useNavigation } from '@react-navigation/native';
|
||||
import { Plus } from 'phosphor-react-native';
|
||||
import { useRef } from 'react';
|
||||
import { useMemo, useRef } from 'react';
|
||||
import { Pressable, View } from 'react-native';
|
||||
import { FlatList } from 'react-native-gesture-handler';
|
||||
import { useCache, useLibraryQuery, useNodes } from '@sd/client';
|
||||
import Empty from '~/components/layout/Empty';
|
||||
import Fade from '~/components/layout/Fade';
|
||||
import { ModalRef } from '~/components/layout/Modal';
|
||||
import ScreenContainer from '~/components/layout/ScreenContainer';
|
||||
import CreateTagModal from '~/components/modal/tag/CreateTagModal';
|
||||
import { TagItem } from '~/components/tags/TagItem';
|
||||
import { tw, twStyle } from '~/lib/tailwind';
|
||||
import { BrowseStackScreenProps } from '~/navigation/tabs/BrowseStack';
|
||||
import { useDebounce } from 'use-debounce';
|
||||
import { useSearchStore } from '~/stores/searchStore';
|
||||
|
||||
interface Props {
|
||||
viewStyle?: 'grid' | 'list';
|
||||
|
@ -21,9 +22,19 @@ export default function TagsScreen({ viewStyle = 'list' }: Props) {
|
|||
const navigation = useNavigation<BrowseStackScreenProps<'Browse'>['navigation']>();
|
||||
const modalRef = useRef<ModalRef>(null);
|
||||
|
||||
const {search} = useSearchStore();
|
||||
const tags = useLibraryQuery(['tags.list']);
|
||||
useNodes(tags.data?.nodes);
|
||||
const tagData = useCache(tags.data?.items);
|
||||
const [debouncedSearch] = useDebounce(search, 200);
|
||||
|
||||
const filteredTags = useMemo(
|
||||
() =>
|
||||
tagData?.filter((location) =>
|
||||
location.name?.toLowerCase().includes(debouncedSearch.toLowerCase())
|
||||
) ?? [],
|
||||
[debouncedSearch, tagData]
|
||||
);
|
||||
|
||||
return (
|
||||
<ScreenContainer scrollview={false} style={tw`relative px-6 py-0`}>
|
||||
|
@ -36,15 +47,8 @@ export default function TagsScreen({ viewStyle = 'list' }: Props) {
|
|||
>
|
||||
<Plus size={20} weight="bold" style={tw`text-ink`} />
|
||||
</Pressable>
|
||||
<Fade
|
||||
fadeSides="top-bottom"
|
||||
orientation="vertical"
|
||||
color="black"
|
||||
width={30}
|
||||
height="100%"
|
||||
>
|
||||
<FlatList
|
||||
data={tagData}
|
||||
data={filteredTags}
|
||||
renderItem={({ item }) => (
|
||||
<TagItem
|
||||
viewStyle={viewStyle}
|
||||
|
@ -76,7 +80,6 @@ export default function TagsScreen({ viewStyle = 'list' }: Props) {
|
|||
tagData.length === 0 && 'h-full items-center justify-center'
|
||||
)}
|
||||
/>
|
||||
</Fade>
|
||||
<CreateTagModal ref={modalRef} />
|
||||
</ScreenContainer>
|
||||
);
|
||||
|
|
|
@ -5,7 +5,7 @@ import SaveAdd from '~/components/search/filters/SaveAdd';
|
|||
const FiltersScreen = () => {
|
||||
return (
|
||||
<>
|
||||
<ScreenContainer bottomFadeStyle="bottom-0" tabHeight={false}>
|
||||
<ScreenContainer tabHeight={false}>
|
||||
<FiltersList />
|
||||
</ScreenContainer>
|
||||
<SaveAdd />
|
||||
|
|
|
@ -129,7 +129,7 @@ function renderSectionHeader({ section }: { section: { title: string } }) {
|
|||
<Text
|
||||
style={twStyle(
|
||||
'mb-3 text-lg font-bold text-ink',
|
||||
section.title === 'Client' ? 'mt-2' : 'mt-5'
|
||||
section.title === 'Client' ? 'mt-0' : 'mt-5'
|
||||
)}
|
||||
>
|
||||
{section.title}
|
||||
|
@ -142,24 +142,24 @@ export default function SettingsScreen({ navigation }: SettingsStackScreenProps<
|
|||
|
||||
return (
|
||||
<ScreenContainer tabHeight={false} scrollview={false} style={tw`gap-0 px-6 py-0`}>
|
||||
<SectionList
|
||||
sections={sections(debugState)}
|
||||
contentContainerStyle={tw`h-auto pb-5 pt-3`}
|
||||
renderItem={({ item }) => (
|
||||
<SettingsItem
|
||||
title={item.title}
|
||||
leftIcon={item.icon}
|
||||
onPress={() => navigation.navigate(item.navigateTo as any)}
|
||||
rounded={item.rounded}
|
||||
/>
|
||||
)}
|
||||
renderSectionHeader={renderSectionHeader}
|
||||
ListFooterComponent={<FooterComponent />}
|
||||
showsVerticalScrollIndicator={false}
|
||||
stickySectionHeadersEnabled={false}
|
||||
initialNumToRender={50}
|
||||
/>
|
||||
</ScreenContainer>
|
||||
<SectionList
|
||||
contentContainerStyle={tw`py-6`}
|
||||
sections={sections(debugState)}
|
||||
renderItem={({ item }) => (
|
||||
<SettingsItem
|
||||
title={item.title}
|
||||
leftIcon={item.icon}
|
||||
onPress={() => navigation.navigate(item.navigateTo as any)}
|
||||
rounded={item.rounded}
|
||||
/>
|
||||
)}
|
||||
renderSectionHeader={renderSectionHeader}
|
||||
ListFooterComponent={<FooterComponent />}
|
||||
showsVerticalScrollIndicator={false}
|
||||
stickySectionHeadersEnabled={false}
|
||||
initialNumToRender={50}
|
||||
/>
|
||||
</ScreenContainer>
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@ use axum::{
|
|||
extract::{FromRequestParts, State},
|
||||
headers::{authorization::Basic, Authorization},
|
||||
http::Request,
|
||||
middleware::{self, Next},
|
||||
middleware::Next,
|
||||
response::{IntoResponse, Response},
|
||||
routing::get,
|
||||
TypedHeader,
|
||||
|
@ -24,12 +24,13 @@ pub struct AppState {
|
|||
auth: HashMap<String, SecStr>,
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
async fn basic_auth<B>(
|
||||
State(state): State<AppState>,
|
||||
request: Request<B>,
|
||||
next: Next<B>,
|
||||
) -> Response {
|
||||
let request = if state.auth.len() != 0 {
|
||||
let request = if !state.auth.is_empty() {
|
||||
let (mut parts, body) = request.into_parts();
|
||||
|
||||
let Ok(TypedHeader(Authorization(hdr))) =
|
||||
|
@ -46,7 +47,7 @@ async fn basic_auth<B>(
|
|||
if state
|
||||
.auth
|
||||
.get(hdr.username())
|
||||
.and_then(|pass| Some(*pass == SecStr::from(hdr.password())))
|
||||
.map(|pass| *pass == SecStr::from(hdr.password()))
|
||||
!= Some(true)
|
||||
{
|
||||
return Response::builder()
|
||||
|
@ -110,7 +111,7 @@ async fn main() {
|
|||
.into_iter()
|
||||
.enumerate()
|
||||
.filter_map(|(i, s)| {
|
||||
if s.len() == 0 {
|
||||
if s.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
|
@ -133,7 +134,7 @@ async fn main() {
|
|||
};
|
||||
|
||||
// We require credentials in production builds (unless explicitly disabled)
|
||||
if auth.len() == 0 && !disabled {
|
||||
if auth.is_empty() && !disabled {
|
||||
#[cfg(not(debug_assertions))]
|
||||
{
|
||||
warn!("The 'SD_AUTH' environment variable is not set!");
|
||||
|
@ -143,6 +144,7 @@ async fn main() {
|
|||
}
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "assets"))]
|
||||
let state = AppState { auth };
|
||||
|
||||
let (node, router) = match Node::new(
|
||||
|
@ -243,7 +245,7 @@ async fn main() {
|
|||
let app = app
|
||||
.route("/", get(|| async { "Spacedrive Server!" }))
|
||||
.fallback(|| async { "404 Not Found: We're past the event horizon..." })
|
||||
.layer(middleware::from_fn_with_state(state, basic_auth));
|
||||
.layer(axum::middleware::from_fn_with_state(state, basic_auth));
|
||||
|
||||
let mut addr = "[::]:8080".parse::<SocketAddr>().unwrap(); // This listens on IPv6 and IPv4
|
||||
addr.set_port(port);
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "sd-core"
|
||||
version = "0.2.11"
|
||||
version = "0.2.13"
|
||||
description = "Virtual distributed filesystem engine that powers Spacedrive."
|
||||
authors = ["Spacedrive Technology Inc."]
|
||||
rust-version = "1.75.0"
|
||||
|
@ -32,15 +32,15 @@ sd-ai = { path = "../crates/ai", optional = true }
|
|||
sd-cache = { path = "../crates/cache" }
|
||||
sd-cloud-api = { version = "0.1.0", path = "../crates/cloud-api" }
|
||||
sd-crypto = { path = "../crates/crypto", features = [
|
||||
"sys",
|
||||
"tokio",
|
||||
"sys",
|
||||
"tokio",
|
||||
], optional = true }
|
||||
sd-ffmpeg = { path = "../crates/ffmpeg", optional = true }
|
||||
sd-file-ext = { path = "../crates/file-ext" }
|
||||
sd-images = { path = "../crates/images", features = [
|
||||
"rspc",
|
||||
"serde",
|
||||
"specta",
|
||||
"rspc",
|
||||
"serde",
|
||||
"specta",
|
||||
] }
|
||||
sd-media-metadata = { path = "../crates/media-metadata" }
|
||||
sd-p2p = { path = "../crates/p2p", features = ["specta"] }
|
||||
|
@ -50,7 +50,6 @@ sd-p2p-tunnel = { path = "../crates/p2p-tunnel" }
|
|||
sd-prisma = { path = "../crates/prisma" }
|
||||
sd-sync = { path = "../crates/sync" }
|
||||
sd-utils = { path = "../crates/utils" }
|
||||
sd-indexer = { path = "../crates/sd-indexer" }
|
||||
|
||||
# Workspace dependencies
|
||||
async-channel = { workspace = true }
|
||||
|
@ -72,27 +71,28 @@ reqwest = { workspace = true, features = ["json", "native-tls-vendored"] }
|
|||
rmp-serde = { workspace = true }
|
||||
rmpv = { workspace = true }
|
||||
rspc = { workspace = true, features = [
|
||||
"axum",
|
||||
"uuid",
|
||||
"chrono",
|
||||
"tracing",
|
||||
"alpha",
|
||||
"unstable",
|
||||
"axum",
|
||||
"uuid",
|
||||
"chrono",
|
||||
"tracing",
|
||||
"alpha",
|
||||
"unstable",
|
||||
] }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
specta = { workspace = true }
|
||||
static_assertions = { workspace = true }
|
||||
strum = { workspace = true, features = ["derive"] }
|
||||
strum_macros = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true, features = [
|
||||
"sync",
|
||||
"rt-multi-thread",
|
||||
"io-util",
|
||||
"macros",
|
||||
"time",
|
||||
"process",
|
||||
"sync",
|
||||
"rt-multi-thread",
|
||||
"io-util",
|
||||
"macros",
|
||||
"time",
|
||||
"process",
|
||||
] }
|
||||
tokio-stream = { workspace = true, features = ["fs"] }
|
||||
tokio-util = { workspace = true, features = ["io"] }
|
||||
|
@ -102,6 +102,7 @@ tracing-subscriber = { workspace = true, features = ["env-filter"] }
|
|||
uuid = { workspace = true, features = ["v4", "serde"] }
|
||||
webp = { workspace = true }
|
||||
|
||||
|
||||
# Specific Core dependencies
|
||||
async-recursion = "1.0.5"
|
||||
async-stream = "0.3.5"
|
||||
|
@ -121,23 +122,16 @@ int-enum = "0.5.0"
|
|||
libc = "0.2.153"
|
||||
mini-moka = "0.10.2"
|
||||
notify = { git = "https://github.com/notify-rs/notify.git", rev = "c3929ed114fbb0bc7457a9a498260461596b00ca", default-features = false, features = [
|
||||
"macos_fsevent",
|
||||
"macos_fsevent",
|
||||
] }
|
||||
rmp = "0.8.12"
|
||||
serde-hashkey = "0.4.5"
|
||||
serde_repr = "0.1"
|
||||
serde_with = "3.4.0"
|
||||
slotmap = "1.0.6"
|
||||
static_assertions = "1.1.0"
|
||||
sysinfo = "0.29.10"
|
||||
tar = "0.4.40"
|
||||
tower-service = "0.3.2"
|
||||
opendal = { version = "0.45.1", features = [
|
||||
"services-gdrive",
|
||||
"services-s3",
|
||||
"services-fs",
|
||||
] }
|
||||
sync_wrapper = { version = "1.0.1", features = ["futures"] }
|
||||
|
||||
# Override features of transitive dependencies
|
||||
[dependencies.openssl]
|
||||
|
@ -160,10 +154,10 @@ trash = "4.1.0"
|
|||
|
||||
[target.'cfg(target_os = "ios")'.dependencies]
|
||||
icrate = { version = "0.1.0", features = [
|
||||
"Foundation",
|
||||
"Foundation_NSFileManager",
|
||||
"Foundation_NSString",
|
||||
"Foundation_NSNumber",
|
||||
"Foundation",
|
||||
"Foundation_NSFileManager",
|
||||
"Foundation_NSString",
|
||||
"Foundation_NSNumber",
|
||||
] }
|
||||
|
||||
[dev-dependencies]
|
||||
|
|
|
@ -100,6 +100,18 @@ impl<'a> IsolatedFilePathData<'a> {
|
|||
self.extension.as_ref()
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn to_owned(self) -> IsolatedFilePathData<'static> {
|
||||
IsolatedFilePathData {
|
||||
location_id: self.location_id,
|
||||
materialized_path: Cow::Owned(self.materialized_path.to_string()),
|
||||
is_dir: self.is_dir,
|
||||
name: Cow::Owned(self.name.to_string()),
|
||||
extension: Cow::Owned(self.extension.to_string()),
|
||||
relative_path: Cow::Owned(self.relative_path.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub const fn is_dir(&self) -> bool {
|
||||
self.is_dir
|
||||
|
|
|
@ -16,6 +16,7 @@ sd-core-prisma-helpers = { path = "../prisma-helpers" }
|
|||
sd-core-sync = { path = "../sync" }
|
||||
|
||||
# Sub-crates
|
||||
sd-file-ext = { path = "../../../crates/file-ext" }
|
||||
sd-prisma = { path = "../../../crates/prisma" }
|
||||
sd-sync = { path = "../../../crates/sync" }
|
||||
sd-task-system = { path = "../../../crates/task-system" }
|
||||
|
@ -24,6 +25,7 @@ sd-utils = { path = "../../../crates/utils" }
|
|||
|
||||
async-channel = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
blake3 = { workspace = true }
|
||||
chrono = { workspace = true, features = ["serde"] }
|
||||
futures = { workspace = true }
|
||||
futures-concurrency = { workspace = true }
|
||||
|
@ -37,6 +39,7 @@ rspc = { workspace = true }
|
|||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
specta = { workspace = true }
|
||||
static_assertions = { workspace = true }
|
||||
strum = { workspace = true, features = ["derive", "phf"] }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true, features = ["fs", "sync", "parking_lot"] }
|
||||
|
|
68
core/crates/heavy-lifting/src/file_identifier/cas_id.rs
Normal file
68
core/crates/heavy-lifting/src/file_identifier/cas_id.rs
Normal file
|
@ -0,0 +1,68 @@
|
|||
use std::path::Path;
|
||||
|
||||
use blake3::Hasher;
|
||||
use static_assertions::const_assert;
|
||||
use tokio::{
|
||||
fs::{self, File},
|
||||
io::{self, AsyncReadExt, AsyncSeekExt, SeekFrom},
|
||||
};
|
||||
|
||||
const SAMPLE_COUNT: u64 = 4;
|
||||
const SAMPLE_SIZE: u64 = 1024 * 10;
|
||||
const HEADER_OR_FOOTER_SIZE: u64 = 1024 * 8;
|
||||
|
||||
// minimum file size of 100KiB, to avoid sample hashing for small files as they can be smaller than the total sample size
|
||||
const MINIMUM_FILE_SIZE: u64 = 1024 * 100;
|
||||
|
||||
// Asserting that nobody messed up our consts
|
||||
const_assert!((HEADER_OR_FOOTER_SIZE * 2 + SAMPLE_COUNT * SAMPLE_SIZE) < MINIMUM_FILE_SIZE);
|
||||
|
||||
// Asserting that the sample size is larger than header/footer size, as the same buffer is used for both
|
||||
const_assert!(SAMPLE_SIZE > HEADER_OR_FOOTER_SIZE);
|
||||
|
||||
// SAFETY: Casts here are safe, they're hardcoded values we have some const assertions above to make sure they're correct
|
||||
#[allow(clippy::cast_possible_truncation)]
|
||||
#[allow(clippy::cast_possible_wrap)]
|
||||
pub async fn generate_cas_id(
|
||||
path: impl AsRef<Path> + Send,
|
||||
size: u64,
|
||||
) -> Result<String, io::Error> {
|
||||
let mut hasher = Hasher::new();
|
||||
hasher.update(&size.to_le_bytes());
|
||||
|
||||
if size <= MINIMUM_FILE_SIZE {
|
||||
// For small files, we hash the whole file
|
||||
hasher.update(&fs::read(path).await?);
|
||||
} else {
|
||||
let mut file = File::open(path).await?;
|
||||
let mut buf = vec![0; SAMPLE_SIZE as usize].into_boxed_slice();
|
||||
|
||||
// Hashing the header
|
||||
let mut current_pos = file
|
||||
.read_exact(&mut buf[..HEADER_OR_FOOTER_SIZE as usize])
|
||||
.await? as u64;
|
||||
hasher.update(&buf[..HEADER_OR_FOOTER_SIZE as usize]);
|
||||
|
||||
// Sample hashing the inner content of the file
|
||||
let seek_jump = (size - HEADER_OR_FOOTER_SIZE * 2) / SAMPLE_COUNT;
|
||||
loop {
|
||||
file.read_exact(&mut buf).await?;
|
||||
hasher.update(&buf);
|
||||
|
||||
if current_pos >= (HEADER_OR_FOOTER_SIZE + seek_jump * (SAMPLE_COUNT - 1)) {
|
||||
break;
|
||||
}
|
||||
|
||||
current_pos = file.seek(SeekFrom::Start(current_pos + seek_jump)).await?;
|
||||
}
|
||||
|
||||
// Hashing the footer
|
||||
file.seek(SeekFrom::End(-(HEADER_OR_FOOTER_SIZE as i64)))
|
||||
.await?;
|
||||
file.read_exact(&mut buf[..HEADER_OR_FOOTER_SIZE as usize])
|
||||
.await?;
|
||||
hasher.update(&buf[..HEADER_OR_FOOTER_SIZE as usize]);
|
||||
}
|
||||
|
||||
Ok(hasher.finalize().to_hex()[..16].to_string())
|
||||
}
|
566
core/crates/heavy-lifting/src/file_identifier/job.rs
Normal file
566
core/crates/heavy-lifting/src/file_identifier/job.rs
Normal file
|
@ -0,0 +1,566 @@
|
|||
use crate::{
|
||||
job_system::{
|
||||
job::{Job, JobReturn, JobTaskDispatcher, ReturnStatus},
|
||||
report::ReportOutputMetadata,
|
||||
utils::cancel_pending_tasks,
|
||||
SerializableJob, SerializedTasks,
|
||||
},
|
||||
utils::sub_path::maybe_get_iso_file_path_from_sub_path,
|
||||
Error, JobContext, JobName, LocationScanState, NonCriticalJobError, ProgressUpdate,
|
||||
};
|
||||
|
||||
use sd_core_file_path_helper::IsolatedFilePathData;
|
||||
use sd_core_prisma_helpers::file_path_for_file_identifier;
|
||||
|
||||
use sd_prisma::prisma::{file_path, location, SortOrder};
|
||||
use sd_task_system::{
|
||||
AnyTaskOutput, IntoTask, SerializableTask, Task, TaskDispatcher, TaskHandle, TaskId,
|
||||
TaskOutput, TaskStatus,
|
||||
};
|
||||
use sd_utils::db::maybe_missing;
|
||||
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
hash::{Hash, Hasher},
|
||||
mem,
|
||||
path::PathBuf,
|
||||
sync::Arc,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use futures::{stream::FuturesUnordered, StreamExt};
|
||||
use futures_concurrency::future::TryJoin;
|
||||
use prisma_client_rust::or;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use tokio::time::Instant;
|
||||
use tracing::warn;
|
||||
|
||||
use super::{
|
||||
tasks::{
|
||||
ExtractFileMetadataTask, ExtractFileMetadataTaskOutput, ObjectProcessorTask,
|
||||
ObjectProcessorTaskMetrics,
|
||||
},
|
||||
FileIdentifierError, CHUNK_SIZE,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct FileIdentifierJob {
|
||||
location: Arc<location::Data>,
|
||||
location_path: Arc<PathBuf>,
|
||||
sub_path: Option<PathBuf>,
|
||||
|
||||
metadata: Metadata,
|
||||
|
||||
errors: Vec<NonCriticalJobError>,
|
||||
|
||||
pending_tasks_on_resume: Vec<TaskHandle<Error>>,
|
||||
tasks_for_shutdown: Vec<Box<dyn Task<Error>>>,
|
||||
}
|
||||
|
||||
impl Hash for FileIdentifierJob {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.location.id.hash(state);
|
||||
if let Some(ref sub_path) = self.sub_path {
|
||||
sub_path.hash(state);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Job for FileIdentifierJob {
|
||||
const NAME: JobName = JobName::FileIdentifier;
|
||||
|
||||
async fn resume_tasks(
|
||||
&mut self,
|
||||
dispatcher: &JobTaskDispatcher,
|
||||
ctx: &impl JobContext,
|
||||
SerializedTasks(serialized_tasks): SerializedTasks,
|
||||
) -> Result<(), Error> {
|
||||
self.pending_tasks_on_resume = dispatcher
|
||||
.dispatch_many_boxed(
|
||||
rmp_serde::from_slice::<Vec<(TaskKind, Vec<u8>)>>(&serialized_tasks)
|
||||
.map_err(FileIdentifierError::from)?
|
||||
.into_iter()
|
||||
.map(|(task_kind, task_bytes)| async move {
|
||||
match task_kind {
|
||||
TaskKind::ExtractFileMetadata => {
|
||||
<ExtractFileMetadataTask as SerializableTask<Error>>::deserialize(
|
||||
&task_bytes,
|
||||
(),
|
||||
)
|
||||
.await
|
||||
.map(IntoTask::into_task)
|
||||
}
|
||||
|
||||
TaskKind::ObjectProcessor => ObjectProcessorTask::deserialize(
|
||||
&task_bytes,
|
||||
(Arc::clone(ctx.db()), Arc::clone(ctx.sync())),
|
||||
)
|
||||
.await
|
||||
.map(IntoTask::into_task),
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.try_join()
|
||||
.await
|
||||
.map_err(FileIdentifierError::from)?,
|
||||
)
|
||||
.await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run(
|
||||
mut self,
|
||||
dispatcher: JobTaskDispatcher,
|
||||
ctx: impl JobContext,
|
||||
) -> Result<ReturnStatus, Error> {
|
||||
let mut pending_running_tasks = FuturesUnordered::new();
|
||||
|
||||
self.init_or_resume(&mut pending_running_tasks, &ctx, &dispatcher)
|
||||
.await?;
|
||||
|
||||
while let Some(task) = pending_running_tasks.next().await {
|
||||
match task {
|
||||
Ok(TaskStatus::Done((task_id, TaskOutput::Out(out)))) => {
|
||||
if let Some(new_object_processor_task) = self
|
||||
.process_task_output(task_id, out, &ctx, &dispatcher)
|
||||
.await
|
||||
{
|
||||
pending_running_tasks.push(new_object_processor_task);
|
||||
};
|
||||
}
|
||||
|
||||
Ok(TaskStatus::Done((task_id, TaskOutput::Empty))) => {
|
||||
warn!("Task <id='{task_id}'> returned an empty output");
|
||||
}
|
||||
|
||||
Ok(TaskStatus::Shutdown(task)) => {
|
||||
self.tasks_for_shutdown.push(task);
|
||||
}
|
||||
|
||||
Ok(TaskStatus::Error(e)) => {
|
||||
cancel_pending_tasks(&pending_running_tasks).await;
|
||||
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
Ok(TaskStatus::Canceled | TaskStatus::ForcedAbortion) => {
|
||||
cancel_pending_tasks(&pending_running_tasks).await;
|
||||
|
||||
return Ok(ReturnStatus::Canceled);
|
||||
}
|
||||
|
||||
Err(e) => {
|
||||
cancel_pending_tasks(&pending_running_tasks).await;
|
||||
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !self.tasks_for_shutdown.is_empty() {
|
||||
return Ok(ReturnStatus::Shutdown(self.serialize().await));
|
||||
}
|
||||
|
||||
// From this point onward, we are done with the job and it can't be interrupted anymore
|
||||
let Self {
|
||||
location,
|
||||
metadata,
|
||||
errors,
|
||||
..
|
||||
} = self;
|
||||
|
||||
ctx.db()
|
||||
.location()
|
||||
.update(
|
||||
location::id::equals(location.id),
|
||||
vec![location::scan_state::set(
|
||||
LocationScanState::FilesIdentified as i32,
|
||||
)],
|
||||
)
|
||||
.exec()
|
||||
.await
|
||||
.map_err(FileIdentifierError::from)?;
|
||||
|
||||
Ok(ReturnStatus::Completed(
|
||||
JobReturn::builder()
|
||||
.with_metadata(metadata)
|
||||
.with_non_critical_errors(errors)
|
||||
.build(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl FileIdentifierJob {
|
||||
pub fn new(
|
||||
location: location::Data,
|
||||
sub_path: Option<PathBuf>,
|
||||
) -> Result<Self, FileIdentifierError> {
|
||||
Ok(Self {
|
||||
location_path: maybe_missing(&location.path, "location.path")
|
||||
.map(PathBuf::from)
|
||||
.map(Arc::new)?,
|
||||
location: Arc::new(location),
|
||||
sub_path,
|
||||
metadata: Metadata::default(),
|
||||
errors: Vec::new(),
|
||||
pending_tasks_on_resume: Vec::new(),
|
||||
tasks_for_shutdown: Vec::new(),
|
||||
})
|
||||
}
|
||||
|
||||
async fn init_or_resume(
|
||||
&mut self,
|
||||
pending_running_tasks: &mut FuturesUnordered<TaskHandle<Error>>,
|
||||
job_ctx: &impl JobContext,
|
||||
dispatcher: &JobTaskDispatcher,
|
||||
) -> Result<(), FileIdentifierError> {
|
||||
// if we don't have any pending task, then this is a fresh job
|
||||
if self.pending_tasks_on_resume.is_empty() {
|
||||
let db = job_ctx.db();
|
||||
let maybe_sub_iso_file_path = maybe_get_iso_file_path_from_sub_path(
|
||||
self.location.id,
|
||||
&self.sub_path,
|
||||
&*self.location_path,
|
||||
db,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut orphans_count = 0;
|
||||
let mut last_orphan_file_path_id = None;
|
||||
|
||||
let start = Instant::now();
|
||||
|
||||
loop {
|
||||
#[allow(clippy::cast_possible_wrap)]
|
||||
// SAFETY: we know that CHUNK_SIZE is a valid i64
|
||||
let orphan_paths = db
|
||||
.file_path()
|
||||
.find_many(orphan_path_filters(
|
||||
self.location.id,
|
||||
last_orphan_file_path_id,
|
||||
&maybe_sub_iso_file_path,
|
||||
))
|
||||
.order_by(file_path::id::order(SortOrder::Asc))
|
||||
.take(CHUNK_SIZE as i64)
|
||||
.select(file_path_for_file_identifier::select())
|
||||
.exec()
|
||||
.await?;
|
||||
|
||||
if orphan_paths.is_empty() {
|
||||
break;
|
||||
}
|
||||
|
||||
orphans_count += orphan_paths.len() as u64;
|
||||
last_orphan_file_path_id =
|
||||
Some(orphan_paths.last().expect("orphan_paths is not empty").id);
|
||||
|
||||
job_ctx.progress(vec![
|
||||
ProgressUpdate::TaskCount(orphans_count),
|
||||
ProgressUpdate::Message(format!("{orphans_count} files to be identified")),
|
||||
]);
|
||||
|
||||
pending_running_tasks.push(
|
||||
dispatcher
|
||||
.dispatch(ExtractFileMetadataTask::new_deep(
|
||||
Arc::clone(&self.location),
|
||||
Arc::clone(&self.location_path),
|
||||
orphan_paths,
|
||||
))
|
||||
.await,
|
||||
);
|
||||
}
|
||||
|
||||
self.metadata.seeking_orphans_time = start.elapsed();
|
||||
self.metadata.total_found_orphans = orphans_count;
|
||||
} else {
|
||||
pending_running_tasks.extend(mem::take(&mut self.pending_tasks_on_resume));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process output of tasks, according to the downcasted output type
|
||||
///
|
||||
/// # Panics
|
||||
/// Will panic if another task type is added in the job, but this function wasn't updated to handle it
|
||||
///
|
||||
async fn process_task_output(
|
||||
&mut self,
|
||||
task_id: TaskId,
|
||||
any_task_output: Box<dyn AnyTaskOutput>,
|
||||
job_ctx: &impl JobContext,
|
||||
dispatcher: &JobTaskDispatcher,
|
||||
) -> Option<TaskHandle<Error>> {
|
||||
if any_task_output.is::<ExtractFileMetadataTaskOutput>() {
|
||||
return self
|
||||
.process_extract_file_metadata_output(
|
||||
*any_task_output
|
||||
.downcast::<ExtractFileMetadataTaskOutput>()
|
||||
.expect("just checked"),
|
||||
job_ctx,
|
||||
dispatcher,
|
||||
)
|
||||
.await;
|
||||
} else if any_task_output.is::<ObjectProcessorTaskMetrics>() {
|
||||
self.process_object_processor_output(
|
||||
*any_task_output
|
||||
.downcast::<ObjectProcessorTaskMetrics>()
|
||||
.expect("just checked"),
|
||||
job_ctx,
|
||||
);
|
||||
} else {
|
||||
unreachable!("Unexpected task output type: <id='{task_id}'>");
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
async fn process_extract_file_metadata_output(
|
||||
&mut self,
|
||||
ExtractFileMetadataTaskOutput {
|
||||
identified_files,
|
||||
extract_metadata_time,
|
||||
errors,
|
||||
}: ExtractFileMetadataTaskOutput,
|
||||
job_ctx: &impl JobContext,
|
||||
dispatcher: &JobTaskDispatcher,
|
||||
) -> Option<TaskHandle<Error>> {
|
||||
self.metadata.extract_metadata_time += extract_metadata_time;
|
||||
self.errors.extend(errors);
|
||||
|
||||
if identified_files.is_empty() {
|
||||
self.metadata.completed_tasks += 1;
|
||||
|
||||
job_ctx.progress(vec![ProgressUpdate::CompletedTaskCount(
|
||||
self.metadata.completed_tasks,
|
||||
)]);
|
||||
|
||||
None
|
||||
} else {
|
||||
job_ctx.progress_msg(format!("Identified {} files", identified_files.len()));
|
||||
|
||||
Some(
|
||||
dispatcher
|
||||
.dispatch(ObjectProcessorTask::new_deep(
|
||||
identified_files,
|
||||
Arc::clone(job_ctx.db()),
|
||||
Arc::clone(job_ctx.sync()),
|
||||
))
|
||||
.await,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
fn process_object_processor_output(
|
||||
&mut self,
|
||||
ObjectProcessorTaskMetrics {
|
||||
assign_cas_ids_time,
|
||||
fetch_existing_objects_time,
|
||||
assign_to_existing_object_time,
|
||||
create_object_time,
|
||||
created_objects_count,
|
||||
linked_objects_count,
|
||||
}: ObjectProcessorTaskMetrics,
|
||||
job_ctx: &impl JobContext,
|
||||
) {
|
||||
self.metadata.assign_cas_ids_time += assign_cas_ids_time;
|
||||
self.metadata.fetch_existing_objects_time += fetch_existing_objects_time;
|
||||
self.metadata.assign_to_existing_object_time += assign_to_existing_object_time;
|
||||
self.metadata.create_object_time += create_object_time;
|
||||
self.metadata.created_objects_count += created_objects_count;
|
||||
self.metadata.linked_objects_count += linked_objects_count;
|
||||
|
||||
self.metadata.completed_tasks += 1;
|
||||
|
||||
job_ctx.progress(vec![
|
||||
ProgressUpdate::CompletedTaskCount(self.metadata.completed_tasks),
|
||||
ProgressUpdate::Message(format!(
|
||||
"Processed {} of {} objects",
|
||||
self.metadata.created_objects_count + self.metadata.linked_objects_count,
|
||||
self.metadata.total_found_orphans
|
||||
)),
|
||||
]);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
||||
enum TaskKind {
|
||||
ExtractFileMetadata,
|
||||
ObjectProcessor,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct SaveState {
|
||||
location: Arc<location::Data>,
|
||||
location_path: Arc<PathBuf>,
|
||||
sub_path: Option<PathBuf>,
|
||||
|
||||
metadata: Metadata,
|
||||
|
||||
errors: Vec<NonCriticalJobError>,
|
||||
|
||||
tasks_for_shutdown_bytes: Option<SerializedTasks>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
pub struct Metadata {
|
||||
extract_metadata_time: Duration,
|
||||
assign_cas_ids_time: Duration,
|
||||
fetch_existing_objects_time: Duration,
|
||||
assign_to_existing_object_time: Duration,
|
||||
create_object_time: Duration,
|
||||
seeking_orphans_time: Duration,
|
||||
total_found_orphans: u64,
|
||||
created_objects_count: u64,
|
||||
linked_objects_count: u64,
|
||||
completed_tasks: u64,
|
||||
}
|
||||
|
||||
impl From<Metadata> for ReportOutputMetadata {
|
||||
fn from(value: Metadata) -> Self {
|
||||
Self::Metrics(HashMap::from([
|
||||
(
|
||||
"extract_metadata_time".into(),
|
||||
json!(value.extract_metadata_time),
|
||||
),
|
||||
(
|
||||
"assign_cas_ids_time".into(),
|
||||
json!(value.assign_cas_ids_time),
|
||||
),
|
||||
(
|
||||
"fetch_existing_objects_time".into(),
|
||||
json!(value.fetch_existing_objects_time),
|
||||
),
|
||||
(
|
||||
"assign_to_existing_object_time".into(),
|
||||
json!(value.assign_to_existing_object_time),
|
||||
),
|
||||
("create_object_time".into(), json!(value.create_object_time)),
|
||||
(
|
||||
"seeking_orphans_time".into(),
|
||||
json!(value.seeking_orphans_time),
|
||||
),
|
||||
(
|
||||
"total_found_orphans".into(),
|
||||
json!(value.total_found_orphans),
|
||||
),
|
||||
(
|
||||
"created_objects_count".into(),
|
||||
json!(value.created_objects_count),
|
||||
),
|
||||
(
|
||||
"linked_objects_count".into(),
|
||||
json!(value.linked_objects_count),
|
||||
),
|
||||
("total_tasks".into(), json!(value.completed_tasks)),
|
||||
]))
|
||||
}
|
||||
}
|
||||
|
||||
impl SerializableJob for FileIdentifierJob {
|
||||
async fn serialize(self) -> Result<Option<Vec<u8>>, rmp_serde::encode::Error> {
|
||||
let Self {
|
||||
location,
|
||||
location_path,
|
||||
sub_path,
|
||||
metadata,
|
||||
errors,
|
||||
tasks_for_shutdown,
|
||||
..
|
||||
} = self;
|
||||
|
||||
rmp_serde::to_vec_named(&SaveState {
|
||||
location,
|
||||
location_path,
|
||||
sub_path,
|
||||
metadata,
|
||||
tasks_for_shutdown_bytes: Some(SerializedTasks(rmp_serde::to_vec_named(
|
||||
&tasks_for_shutdown
|
||||
.into_iter()
|
||||
.map(|task| async move {
|
||||
if task.is::<ExtractFileMetadataTask>() {
|
||||
SerializableTask::serialize(
|
||||
*task
|
||||
.downcast::<ExtractFileMetadataTask>()
|
||||
.expect("just checked"),
|
||||
)
|
||||
.await
|
||||
.map(|bytes| (TaskKind::ExtractFileMetadata, bytes))
|
||||
} else if task.is::<ObjectProcessorTask>() {
|
||||
task.downcast::<ObjectProcessorTask>()
|
||||
.expect("just checked")
|
||||
.serialize()
|
||||
.await
|
||||
.map(|bytes| (TaskKind::ObjectProcessor, bytes))
|
||||
} else {
|
||||
unreachable!("Unexpected task type")
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.try_join()
|
||||
.await?,
|
||||
)?)),
|
||||
errors,
|
||||
})
|
||||
.map(Some)
|
||||
}
|
||||
|
||||
async fn deserialize(
|
||||
serialized_job: &[u8],
|
||||
_: &impl JobContext,
|
||||
) -> Result<Option<(Self, Option<SerializedTasks>)>, rmp_serde::decode::Error> {
|
||||
let SaveState {
|
||||
location,
|
||||
location_path,
|
||||
sub_path,
|
||||
metadata,
|
||||
|
||||
errors,
|
||||
tasks_for_shutdown_bytes,
|
||||
} = rmp_serde::from_slice::<SaveState>(serialized_job)?;
|
||||
|
||||
Ok(Some((
|
||||
Self {
|
||||
location,
|
||||
location_path,
|
||||
sub_path,
|
||||
metadata,
|
||||
errors,
|
||||
pending_tasks_on_resume: Vec::new(),
|
||||
tasks_for_shutdown: Vec::new(),
|
||||
},
|
||||
tasks_for_shutdown_bytes,
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
fn orphan_path_filters(
|
||||
location_id: location::id::Type,
|
||||
file_path_id: Option<file_path::id::Type>,
|
||||
maybe_sub_iso_file_path: &Option<IsolatedFilePathData<'_>>,
|
||||
) -> Vec<file_path::WhereParam> {
|
||||
sd_utils::chain_optional_iter(
|
||||
[
|
||||
or!(
|
||||
file_path::object_id::equals(None),
|
||||
file_path::cas_id::equals(None)
|
||||
),
|
||||
file_path::is_dir::equals(Some(false)),
|
||||
file_path::location_id::equals(Some(location_id)),
|
||||
file_path::size_in_bytes_bytes::not(Some(0u64.to_be_bytes().to_vec())),
|
||||
],
|
||||
[
|
||||
// this is a workaround for the cursor not working properly
|
||||
file_path_id.map(file_path::id::gte),
|
||||
maybe_sub_iso_file_path.as_ref().map(|sub_iso_file_path| {
|
||||
file_path::materialized_path::starts_with(
|
||||
sub_iso_file_path
|
||||
.materialized_path_for_children()
|
||||
.expect("sub path iso_file_path must be a directory"),
|
||||
)
|
||||
}),
|
||||
],
|
||||
)
|
||||
}
|
120
core/crates/heavy-lifting/src/file_identifier/mod.rs
Normal file
120
core/crates/heavy-lifting/src/file_identifier/mod.rs
Normal file
|
@ -0,0 +1,120 @@
|
|||
use crate::utils::sub_path::SubPathError;
|
||||
|
||||
use sd_core_file_path_helper::{FilePathError, IsolatedFilePathData};
|
||||
|
||||
use sd_file_ext::{extensions::Extension, kind::ObjectKind};
|
||||
use sd_utils::{db::MissingFieldError, error::FileIOError};
|
||||
|
||||
use std::{fs::Metadata, path::Path};
|
||||
|
||||
use prisma_client_rust::QueryError;
|
||||
use rspc::ErrorCode;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use specta::Type;
|
||||
use tokio::fs;
|
||||
use tracing::trace;
|
||||
|
||||
mod cas_id;
|
||||
mod job;
|
||||
mod shallow;
|
||||
mod tasks;
|
||||
|
||||
use cas_id::generate_cas_id;
|
||||
|
||||
pub use job::FileIdentifierJob;
|
||||
pub use shallow::shallow;
|
||||
|
||||
// we break these tasks into chunks of 100 to improve performance
|
||||
const CHUNK_SIZE: usize = 100;
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum FileIdentifierError {
|
||||
#[error("missing field on database: {0}")]
|
||||
MissingField(#[from] MissingFieldError),
|
||||
#[error("failed to deserialized stored tasks for job resume: {0}")]
|
||||
DeserializeTasks(#[from] rmp_serde::decode::Error),
|
||||
#[error("database error: {0}")]
|
||||
Database(#[from] QueryError),
|
||||
|
||||
#[error(transparent)]
|
||||
FilePathError(#[from] FilePathError),
|
||||
#[error(transparent)]
|
||||
SubPath(#[from] SubPathError),
|
||||
}
|
||||
|
||||
impl From<FileIdentifierError> for rspc::Error {
|
||||
fn from(err: FileIdentifierError) -> Self {
|
||||
match err {
|
||||
FileIdentifierError::SubPath(sub_path_err) => sub_path_err.into(),
|
||||
|
||||
_ => Self::with_cause(ErrorCode::InternalServerError, err.to_string(), err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(thiserror::Error, Debug, Serialize, Deserialize, Type)]
|
||||
pub enum NonCriticalFileIdentifierError {
|
||||
#[error("failed to extract file metadata: {0}")]
|
||||
FailedToExtractFileMetadata(String),
|
||||
#[cfg(target_os = "windows")]
|
||||
#[error("failed to extract metadata from on-demand file: {0}")]
|
||||
FailedToExtractMetadataFromOnDemandFile(String),
|
||||
#[error("failed to extract isolated file path data: {0}")]
|
||||
FailedToExtractIsolatedFilePathData(String),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct FileMetadata {
|
||||
pub cas_id: Option<String>,
|
||||
pub kind: ObjectKind,
|
||||
pub fs_metadata: Metadata,
|
||||
}
|
||||
|
||||
impl FileMetadata {
|
||||
/// Fetch metadata from the file system and generate a cas id for the file
|
||||
/// if it's not empty.
|
||||
///
|
||||
/// # Panics
|
||||
/// Will panic if the file is a directory.
|
||||
pub async fn new(
|
||||
location_path: impl AsRef<Path> + Send,
|
||||
iso_file_path: &IsolatedFilePathData<'_>,
|
||||
) -> Result<Self, FileIOError> {
|
||||
let path = location_path.as_ref().join(iso_file_path);
|
||||
|
||||
let fs_metadata = fs::metadata(&path)
|
||||
.await
|
||||
.map_err(|e| FileIOError::from((&path, e)))?;
|
||||
|
||||
assert!(
|
||||
!fs_metadata.is_dir(),
|
||||
"We can't generate cas_id for directories"
|
||||
);
|
||||
|
||||
// derive Object kind
|
||||
let kind = Extension::resolve_conflicting(&path, false)
|
||||
.await
|
||||
.map_or(ObjectKind::Unknown, Into::into);
|
||||
|
||||
let cas_id = if fs_metadata.len() != 0 {
|
||||
generate_cas_id(&path, fs_metadata.len())
|
||||
.await
|
||||
.map(Some)
|
||||
.map_err(|e| FileIOError::from((&path, e)))?
|
||||
} else {
|
||||
// We can't do shit with empty files
|
||||
None
|
||||
};
|
||||
|
||||
trace!(
|
||||
"Analyzed file: <path='{}', cas_id={cas_id:?}, object_kind={kind}>",
|
||||
path.display()
|
||||
);
|
||||
|
||||
Ok(Self {
|
||||
cas_id,
|
||||
kind,
|
||||
fs_metadata,
|
||||
})
|
||||
}
|
||||
}
|
207
core/crates/heavy-lifting/src/file_identifier/shallow.rs
Normal file
207
core/crates/heavy-lifting/src/file_identifier/shallow.rs
Normal file
|
@ -0,0 +1,207 @@
|
|||
use crate::{utils::sub_path::maybe_get_iso_file_path_from_sub_path, Error, NonCriticalJobError};
|
||||
|
||||
use sd_core_file_path_helper::IsolatedFilePathData;
|
||||
use sd_core_prisma_helpers::file_path_for_file_identifier;
|
||||
use sd_core_sync::Manager as SyncManager;
|
||||
|
||||
use sd_prisma::prisma::{file_path, location, PrismaClient, SortOrder};
|
||||
use sd_task_system::{
|
||||
BaseTaskDispatcher, CancelTaskOnDrop, TaskDispatcher, TaskOutput, TaskStatus,
|
||||
};
|
||||
use sd_utils::db::maybe_missing;
|
||||
|
||||
use std::{
|
||||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use futures_concurrency::future::FutureGroup;
|
||||
use lending_stream::{LendingStream, StreamExt};
|
||||
use prisma_client_rust::or;
|
||||
use tracing::{debug, warn};
|
||||
|
||||
use super::{
|
||||
tasks::{ExtractFileMetadataTask, ExtractFileMetadataTaskOutput, ObjectProcessorTask},
|
||||
FileIdentifierError, CHUNK_SIZE,
|
||||
};
|
||||
|
||||
pub async fn shallow(
|
||||
location: location::Data,
|
||||
sub_path: impl AsRef<Path> + Send,
|
||||
dispatcher: BaseTaskDispatcher<Error>,
|
||||
db: Arc<PrismaClient>,
|
||||
sync: Arc<SyncManager>,
|
||||
invalidate_query: impl Fn(&'static str) + Send + Sync,
|
||||
) -> Result<Vec<NonCriticalJobError>, Error> {
|
||||
let sub_path = sub_path.as_ref();
|
||||
|
||||
let location_path = maybe_missing(&location.path, "location.path")
|
||||
.map(PathBuf::from)
|
||||
.map(Arc::new)
|
||||
.map_err(FileIdentifierError::from)?;
|
||||
|
||||
let location = Arc::new(location);
|
||||
|
||||
let sub_iso_file_path =
|
||||
maybe_get_iso_file_path_from_sub_path(location.id, &Some(sub_path), &*location_path, &db)
|
||||
.await
|
||||
.map_err(FileIdentifierError::from)?
|
||||
.map_or_else(
|
||||
|| {
|
||||
IsolatedFilePathData::new(location.id, &*location_path, &*location_path, true)
|
||||
.map_err(FileIdentifierError::from)
|
||||
},
|
||||
Ok,
|
||||
)?;
|
||||
|
||||
let mut orphans_count = 0;
|
||||
let mut last_orphan_file_path_id = None;
|
||||
|
||||
let mut pending_running_tasks = FutureGroup::new();
|
||||
|
||||
loop {
|
||||
#[allow(clippy::cast_possible_wrap)]
|
||||
// SAFETY: we know that CHUNK_SIZE is a valid i64
|
||||
let orphan_paths = db
|
||||
.file_path()
|
||||
.find_many(orphan_path_filters(
|
||||
location.id,
|
||||
last_orphan_file_path_id,
|
||||
&sub_iso_file_path,
|
||||
))
|
||||
.order_by(file_path::id::order(SortOrder::Asc))
|
||||
.take(CHUNK_SIZE as i64)
|
||||
.select(file_path_for_file_identifier::select())
|
||||
.exec()
|
||||
.await
|
||||
.map_err(FileIdentifierError::from)?;
|
||||
|
||||
let Some(last_orphan) = orphan_paths.last() else {
|
||||
// No orphans here!
|
||||
break;
|
||||
};
|
||||
|
||||
orphans_count += orphan_paths.len() as u64;
|
||||
last_orphan_file_path_id = Some(last_orphan.id);
|
||||
|
||||
pending_running_tasks.insert(CancelTaskOnDrop(
|
||||
dispatcher
|
||||
.dispatch(ExtractFileMetadataTask::new_shallow(
|
||||
Arc::clone(&location),
|
||||
Arc::clone(&location_path),
|
||||
orphan_paths,
|
||||
))
|
||||
.await,
|
||||
));
|
||||
}
|
||||
|
||||
if orphans_count == 0 {
|
||||
debug!(
|
||||
"No orphans found on <location_id={}, sub_path='{}'>",
|
||||
location.id,
|
||||
sub_path.display()
|
||||
);
|
||||
return Ok(vec![]);
|
||||
}
|
||||
|
||||
let errors = process_tasks(pending_running_tasks, dispatcher, db, sync).await?;
|
||||
|
||||
invalidate_query("search.paths");
|
||||
invalidate_query("search.objects");
|
||||
|
||||
Ok(errors)
|
||||
}
|
||||
|
||||
async fn process_tasks(
|
||||
pending_running_tasks: FutureGroup<CancelTaskOnDrop<Error>>,
|
||||
dispatcher: BaseTaskDispatcher<Error>,
|
||||
db: Arc<PrismaClient>,
|
||||
sync: Arc<SyncManager>,
|
||||
) -> Result<Vec<NonCriticalJobError>, Error> {
|
||||
let mut pending_running_tasks = pending_running_tasks.lend_mut();
|
||||
|
||||
let mut errors = vec![];
|
||||
|
||||
while let Some((pending_running_tasks, task_result)) = pending_running_tasks.next().await {
|
||||
match task_result {
|
||||
Ok(TaskStatus::Done((_, TaskOutput::Out(any_task_output)))) => {
|
||||
// We only care about ExtractFileMetadataTaskOutput because we need to dispatch further tasks
|
||||
// and the ObjectProcessorTask only gives back some metrics not much important for
|
||||
// shallow file identifier
|
||||
if any_task_output.is::<ExtractFileMetadataTaskOutput>() {
|
||||
let ExtractFileMetadataTaskOutput {
|
||||
identified_files,
|
||||
errors: more_errors,
|
||||
..
|
||||
} = *any_task_output
|
||||
.downcast::<ExtractFileMetadataTaskOutput>()
|
||||
.expect("just checked");
|
||||
|
||||
errors.extend(more_errors);
|
||||
|
||||
if !identified_files.is_empty() {
|
||||
pending_running_tasks.insert(CancelTaskOnDrop(
|
||||
dispatcher
|
||||
.dispatch(ObjectProcessorTask::new_shallow(
|
||||
identified_files,
|
||||
Arc::clone(&db),
|
||||
Arc::clone(&sync),
|
||||
))
|
||||
.await,
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(TaskStatus::Done((task_id, TaskOutput::Empty))) => {
|
||||
warn!("Task <id='{task_id}'> returned an empty output");
|
||||
}
|
||||
|
||||
Ok(TaskStatus::Shutdown(_)) => {
|
||||
debug!(
|
||||
"Spacedrive is shutting down while a shallow file identifier was in progress"
|
||||
);
|
||||
return Ok(vec![]);
|
||||
}
|
||||
|
||||
Ok(TaskStatus::Error(e)) => {
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
Ok(TaskStatus::Canceled | TaskStatus::ForcedAbortion) => {
|
||||
warn!("Task was cancelled or aborted on shallow file identifier");
|
||||
return Ok(vec![]);
|
||||
}
|
||||
|
||||
Err(e) => {
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(errors)
|
||||
}
|
||||
|
||||
fn orphan_path_filters(
|
||||
location_id: location::id::Type,
|
||||
file_path_id: Option<file_path::id::Type>,
|
||||
sub_iso_file_path: &IsolatedFilePathData<'_>,
|
||||
) -> Vec<file_path::WhereParam> {
|
||||
sd_utils::chain_optional_iter(
|
||||
[
|
||||
or!(
|
||||
file_path::object_id::equals(None),
|
||||
file_path::cas_id::equals(None)
|
||||
),
|
||||
file_path::is_dir::equals(Some(false)),
|
||||
file_path::location_id::equals(Some(location_id)),
|
||||
file_path::materialized_path::equals(Some(
|
||||
sub_iso_file_path
|
||||
.materialized_path_for_children()
|
||||
.expect("sub path for shallow identifier must be a directory"),
|
||||
)),
|
||||
file_path::size_in_bytes_bytes::not(Some(0u64.to_be_bytes().to_vec())),
|
||||
],
|
||||
[file_path_id.map(file_path::id::gte)],
|
||||
)
|
||||
}
|
|
@ -0,0 +1,280 @@
|
|||
use crate::{
|
||||
file_identifier::{FileMetadata, NonCriticalFileIdentifierError},
|
||||
Error, NonCriticalJobError,
|
||||
};
|
||||
|
||||
use sd_core_file_path_helper::IsolatedFilePathData;
|
||||
use sd_core_prisma_helpers::file_path_for_file_identifier;
|
||||
|
||||
use sd_prisma::prisma::location;
|
||||
use sd_task_system::{
|
||||
ExecStatus, Interrupter, InterruptionKind, IntoAnyTaskOutput, SerializableTask, Task, TaskId,
|
||||
};
|
||||
use sd_utils::error::FileIOError;
|
||||
|
||||
use std::{
|
||||
collections::HashMap, future::IntoFuture, mem, path::PathBuf, pin::pin, sync::Arc,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use futures::stream::{self, FuturesUnordered, StreamExt};
|
||||
use futures_concurrency::stream::Merge;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::time::Instant;
|
||||
use tracing::error;
|
||||
use uuid::Uuid;
|
||||
|
||||
use super::IdentifiedFile;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ExtractFileMetadataTask {
|
||||
id: TaskId,
|
||||
location: Arc<location::Data>,
|
||||
location_path: Arc<PathBuf>,
|
||||
file_paths_by_id: HashMap<Uuid, file_path_for_file_identifier::Data>,
|
||||
identified_files: HashMap<Uuid, IdentifiedFile>,
|
||||
extract_metadata_time: Duration,
|
||||
errors: Vec<NonCriticalJobError>,
|
||||
is_shallow: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ExtractFileMetadataTaskOutput {
|
||||
pub identified_files: HashMap<Uuid, IdentifiedFile>,
|
||||
pub extract_metadata_time: Duration,
|
||||
pub errors: Vec<NonCriticalJobError>,
|
||||
}
|
||||
|
||||
impl ExtractFileMetadataTask {
|
||||
fn new(
|
||||
location: Arc<location::Data>,
|
||||
location_path: Arc<PathBuf>,
|
||||
file_paths: Vec<file_path_for_file_identifier::Data>,
|
||||
is_shallow: bool,
|
||||
) -> Self {
|
||||
Self {
|
||||
id: TaskId::new_v4(),
|
||||
location,
|
||||
location_path,
|
||||
identified_files: HashMap::with_capacity(file_paths.len()),
|
||||
file_paths_by_id: file_paths
|
||||
.into_iter()
|
||||
.map(|file_path| {
|
||||
// SAFETY: This should never happen
|
||||
(
|
||||
Uuid::from_slice(&file_path.pub_id).expect("file_path.pub_id is invalid!"),
|
||||
file_path,
|
||||
)
|
||||
})
|
||||
.collect(),
|
||||
extract_metadata_time: Duration::ZERO,
|
||||
errors: Vec::new(),
|
||||
is_shallow,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn new_deep(
|
||||
location: Arc<location::Data>,
|
||||
location_path: Arc<PathBuf>,
|
||||
file_paths: Vec<file_path_for_file_identifier::Data>,
|
||||
) -> Self {
|
||||
Self::new(location, location_path, file_paths, false)
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn new_shallow(
|
||||
location: Arc<location::Data>,
|
||||
location_path: Arc<PathBuf>,
|
||||
file_paths: Vec<file_path_for_file_identifier::Data>,
|
||||
) -> Self {
|
||||
Self::new(location, location_path, file_paths, true)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Task<Error> for ExtractFileMetadataTask {
|
||||
fn id(&self) -> TaskId {
|
||||
self.id
|
||||
}
|
||||
|
||||
fn with_priority(&self) -> bool {
|
||||
self.is_shallow
|
||||
}
|
||||
|
||||
async fn run(&mut self, interrupter: &Interrupter) -> Result<ExecStatus, Error> {
|
||||
enum StreamMessage {
|
||||
Processed(Uuid, Result<FileMetadata, FileIOError>),
|
||||
Interrupt(InterruptionKind),
|
||||
}
|
||||
|
||||
let Self {
|
||||
location,
|
||||
location_path,
|
||||
file_paths_by_id,
|
||||
identified_files,
|
||||
extract_metadata_time,
|
||||
errors,
|
||||
..
|
||||
} = self;
|
||||
|
||||
let start_time = Instant::now();
|
||||
|
||||
if !file_paths_by_id.is_empty() {
|
||||
let extraction_futures = file_paths_by_id
|
||||
.iter()
|
||||
.filter_map(|(file_path_id, file_path)| {
|
||||
try_iso_file_path_extraction(
|
||||
location.id,
|
||||
*file_path_id,
|
||||
file_path,
|
||||
Arc::clone(location_path),
|
||||
errors,
|
||||
)
|
||||
})
|
||||
.map(|(file_path_id, iso_file_path, location_path)| async move {
|
||||
StreamMessage::Processed(
|
||||
file_path_id,
|
||||
FileMetadata::new(&*location_path, &iso_file_path).await,
|
||||
)
|
||||
})
|
||||
.collect::<FuturesUnordered<_>>();
|
||||
|
||||
let mut msg_stream = pin!((
|
||||
extraction_futures,
|
||||
stream::once(interrupter.into_future()).map(StreamMessage::Interrupt)
|
||||
)
|
||||
.merge());
|
||||
|
||||
while let Some(msg) = msg_stream.next().await {
|
||||
match msg {
|
||||
StreamMessage::Processed(file_path_pub_id, res) => {
|
||||
let file_path = file_paths_by_id
|
||||
.remove(&file_path_pub_id)
|
||||
.expect("file_path must be here");
|
||||
|
||||
match res {
|
||||
Ok(FileMetadata { cas_id, kind, .. }) => {
|
||||
identified_files.insert(
|
||||
file_path_pub_id,
|
||||
IdentifiedFile {
|
||||
file_path,
|
||||
cas_id,
|
||||
kind,
|
||||
},
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
handle_non_critical_errors(
|
||||
location.id,
|
||||
file_path_pub_id,
|
||||
&e,
|
||||
errors,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if file_paths_by_id.is_empty() {
|
||||
// All files have been processed so we can end this merged stream and don't keep waiting an
|
||||
// interrupt signal
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
StreamMessage::Interrupt(kind) => {
|
||||
*extract_metadata_time += start_time.elapsed();
|
||||
return Ok(match kind {
|
||||
InterruptionKind::Pause => ExecStatus::Paused,
|
||||
InterruptionKind::Cancel => ExecStatus::Canceled,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ExecStatus::Done(
|
||||
ExtractFileMetadataTaskOutput {
|
||||
identified_files: mem::take(identified_files),
|
||||
extract_metadata_time: *extract_metadata_time + start_time.elapsed(),
|
||||
errors: mem::take(errors),
|
||||
}
|
||||
.into_output(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_non_critical_errors(
|
||||
location_id: location::id::Type,
|
||||
file_path_pub_id: Uuid,
|
||||
e: &FileIOError,
|
||||
errors: &mut Vec<NonCriticalJobError>,
|
||||
) {
|
||||
error!("Failed to extract file metadata <location_id={location_id}, file_path_pub_id='{file_path_pub_id}'>: {e:#?}");
|
||||
|
||||
let formatted_error = format!("<file_path_pub_id='{file_path_pub_id}', error={e}>");
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
{
|
||||
// Handle case where file is on-demand (NTFS only)
|
||||
if e.source.raw_os_error().map_or(false, |code| code == 362) {
|
||||
errors.push(
|
||||
NonCriticalFileIdentifierError::FailedToExtractMetadataFromOnDemandFile(
|
||||
formatted_error,
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
} else {
|
||||
errors.push(
|
||||
NonCriticalFileIdentifierError::FailedToExtractFileMetadata(formatted_error).into(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
{
|
||||
errors.push(
|
||||
NonCriticalFileIdentifierError::FailedToExtractFileMetadata(formatted_error).into(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
fn try_iso_file_path_extraction(
|
||||
location_id: location::id::Type,
|
||||
file_path_pub_id: Uuid,
|
||||
file_path: &file_path_for_file_identifier::Data,
|
||||
location_path: Arc<PathBuf>,
|
||||
errors: &mut Vec<NonCriticalJobError>,
|
||||
) -> Option<(Uuid, IsolatedFilePathData<'static>, Arc<PathBuf>)> {
|
||||
IsolatedFilePathData::try_from((location_id, file_path))
|
||||
.map(IsolatedFilePathData::to_owned)
|
||||
.map(|iso_file_path| (file_path_pub_id, iso_file_path, location_path))
|
||||
.map_err(|e| {
|
||||
error!("Failed to extract isolated file path data: {e:#?}");
|
||||
errors.push(
|
||||
NonCriticalFileIdentifierError::FailedToExtractIsolatedFilePathData(format!(
|
||||
"<file_path_pub_id='{file_path_pub_id}', error={e}>"
|
||||
))
|
||||
.into(),
|
||||
);
|
||||
})
|
||||
.ok()
|
||||
}
|
||||
|
||||
impl SerializableTask<Error> for ExtractFileMetadataTask {
|
||||
type SerializeError = rmp_serde::encode::Error;
|
||||
|
||||
type DeserializeError = rmp_serde::decode::Error;
|
||||
|
||||
type DeserializeCtx = ();
|
||||
|
||||
async fn serialize(self) -> Result<Vec<u8>, Self::SerializeError> {
|
||||
rmp_serde::to_vec_named(&self)
|
||||
}
|
||||
|
||||
async fn deserialize(
|
||||
data: &[u8],
|
||||
(): Self::DeserializeCtx,
|
||||
) -> Result<Self, Self::DeserializeError> {
|
||||
rmp_serde::from_slice(data)
|
||||
}
|
||||
}
|
18
core/crates/heavy-lifting/src/file_identifier/tasks/mod.rs
Normal file
18
core/crates/heavy-lifting/src/file_identifier/tasks/mod.rs
Normal file
|
@ -0,0 +1,18 @@
|
|||
use sd_core_prisma_helpers::file_path_for_file_identifier;
|
||||
|
||||
use sd_file_ext::kind::ObjectKind;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
mod extract_file_metadata;
|
||||
mod object_processor;
|
||||
|
||||
pub use extract_file_metadata::{ExtractFileMetadataTask, ExtractFileMetadataTaskOutput};
|
||||
pub use object_processor::{ObjectProcessorTask, ObjectProcessorTaskMetrics};
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub(super) struct IdentifiedFile {
|
||||
pub(super) file_path: file_path_for_file_identifier::Data,
|
||||
pub(super) cas_id: Option<String>,
|
||||
pub(super) kind: ObjectKind,
|
||||
}
|
|
@ -0,0 +1,473 @@
|
|||
use crate::{file_identifier::FileIdentifierError, Error};
|
||||
|
||||
use sd_core_prisma_helpers::{
|
||||
file_path_for_file_identifier, file_path_pub_id, object_for_file_identifier,
|
||||
};
|
||||
use sd_core_sync::Manager as SyncManager;
|
||||
|
||||
use sd_prisma::{
|
||||
prisma::{file_path, object, PrismaClient},
|
||||
prisma_sync,
|
||||
};
|
||||
use sd_sync::{CRDTOperation, OperationFactory};
|
||||
use sd_task_system::{
|
||||
check_interruption, ExecStatus, Interrupter, IntoAnyTaskOutput, SerializableTask, Task, TaskId,
|
||||
};
|
||||
use sd_utils::{msgpack, uuid_to_bytes};
|
||||
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
mem,
|
||||
sync::Arc,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use prisma_client_rust::Select;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::time::Instant;
|
||||
use tracing::{debug, trace};
|
||||
use uuid::Uuid;
|
||||
|
||||
use super::IdentifiedFile;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ObjectProcessorTask {
|
||||
id: TaskId,
|
||||
db: Arc<PrismaClient>,
|
||||
sync: Arc<SyncManager>,
|
||||
identified_files: HashMap<Uuid, IdentifiedFile>,
|
||||
metrics: ObjectProcessorTaskMetrics,
|
||||
stage: Stage,
|
||||
is_shallow: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct SaveState {
|
||||
id: TaskId,
|
||||
identified_files: HashMap<Uuid, IdentifiedFile>,
|
||||
metrics: ObjectProcessorTaskMetrics,
|
||||
stage: Stage,
|
||||
is_shallow: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Default)]
|
||||
pub struct ObjectProcessorTaskMetrics {
|
||||
pub assign_cas_ids_time: Duration,
|
||||
pub fetch_existing_objects_time: Duration,
|
||||
pub assign_to_existing_object_time: Duration,
|
||||
pub create_object_time: Duration,
|
||||
pub created_objects_count: u64,
|
||||
pub linked_objects_count: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
enum Stage {
|
||||
Starting,
|
||||
FetchExistingObjects,
|
||||
AssignFilePathsToExistingObjects {
|
||||
existing_objects_by_cas_id: HashMap<String, object_for_file_identifier::Data>,
|
||||
},
|
||||
CreateObjects,
|
||||
}
|
||||
|
||||
impl ObjectProcessorTask {
|
||||
fn new(
|
||||
identified_files: HashMap<Uuid, IdentifiedFile>,
|
||||
db: Arc<PrismaClient>,
|
||||
sync: Arc<SyncManager>,
|
||||
is_shallow: bool,
|
||||
) -> Self {
|
||||
Self {
|
||||
id: TaskId::new_v4(),
|
||||
db,
|
||||
sync,
|
||||
identified_files,
|
||||
stage: Stage::Starting,
|
||||
metrics: ObjectProcessorTaskMetrics::default(),
|
||||
is_shallow,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_deep(
|
||||
identified_files: HashMap<Uuid, IdentifiedFile>,
|
||||
db: Arc<PrismaClient>,
|
||||
sync: Arc<SyncManager>,
|
||||
) -> Self {
|
||||
Self::new(identified_files, db, sync, false)
|
||||
}
|
||||
|
||||
pub fn new_shallow(
|
||||
identified_files: HashMap<Uuid, IdentifiedFile>,
|
||||
db: Arc<PrismaClient>,
|
||||
sync: Arc<SyncManager>,
|
||||
) -> Self {
|
||||
Self::new(identified_files, db, sync, true)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Task<Error> for ObjectProcessorTask {
|
||||
fn id(&self) -> TaskId {
|
||||
self.id
|
||||
}
|
||||
|
||||
fn with_priority(&self) -> bool {
|
||||
self.is_shallow
|
||||
}
|
||||
|
||||
async fn run(&mut self, interrupter: &Interrupter) -> Result<ExecStatus, Error> {
|
||||
let Self {
|
||||
db,
|
||||
sync,
|
||||
identified_files,
|
||||
stage,
|
||||
metrics:
|
||||
ObjectProcessorTaskMetrics {
|
||||
assign_cas_ids_time,
|
||||
fetch_existing_objects_time,
|
||||
assign_to_existing_object_time,
|
||||
create_object_time,
|
||||
created_objects_count,
|
||||
linked_objects_count,
|
||||
},
|
||||
..
|
||||
} = self;
|
||||
|
||||
loop {
|
||||
match stage {
|
||||
Stage::Starting => {
|
||||
let start = Instant::now();
|
||||
assign_cas_id_to_file_paths(identified_files, db, sync).await?;
|
||||
*assign_cas_ids_time = start.elapsed();
|
||||
*stage = Stage::FetchExistingObjects;
|
||||
}
|
||||
|
||||
Stage::FetchExistingObjects => {
|
||||
let start = Instant::now();
|
||||
let existing_objects_by_cas_id =
|
||||
fetch_existing_objects_by_cas_id(identified_files, db).await?;
|
||||
*fetch_existing_objects_time = start.elapsed();
|
||||
*stage = Stage::AssignFilePathsToExistingObjects {
|
||||
existing_objects_by_cas_id,
|
||||
};
|
||||
}
|
||||
|
||||
Stage::AssignFilePathsToExistingObjects {
|
||||
existing_objects_by_cas_id,
|
||||
} => {
|
||||
let start = Instant::now();
|
||||
let assigned_file_path_pub_ids = assign_existing_objects_to_file_paths(
|
||||
identified_files,
|
||||
existing_objects_by_cas_id,
|
||||
db,
|
||||
sync,
|
||||
)
|
||||
.await?;
|
||||
*assign_to_existing_object_time = start.elapsed();
|
||||
*linked_objects_count = assigned_file_path_pub_ids.len() as u64;
|
||||
|
||||
debug!(
|
||||
"Found {} existing Objects, linked file paths to them",
|
||||
existing_objects_by_cas_id.len()
|
||||
);
|
||||
|
||||
for file_path_pub_id::Data { pub_id } in assigned_file_path_pub_ids {
|
||||
let pub_id = Uuid::from_slice(&pub_id).expect("uuid bytes are invalid");
|
||||
trace!("Assigned file path <file_path_pub_id={pub_id}> to existing object");
|
||||
|
||||
identified_files
|
||||
.remove(&pub_id)
|
||||
.expect("file_path must be here");
|
||||
}
|
||||
|
||||
*stage = Stage::CreateObjects;
|
||||
|
||||
if identified_files.is_empty() {
|
||||
// No objects to be created, we're good to finish already
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Stage::CreateObjects => {
|
||||
let start = Instant::now();
|
||||
*created_objects_count = create_objects(identified_files, db, sync).await?;
|
||||
*create_object_time = start.elapsed();
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
check_interruption!(interrupter);
|
||||
}
|
||||
|
||||
Ok(ExecStatus::Done(mem::take(&mut self.metrics).into_output()))
|
||||
}
|
||||
}
|
||||
|
||||
async fn assign_cas_id_to_file_paths(
|
||||
identified_files: &HashMap<Uuid, IdentifiedFile>,
|
||||
db: &PrismaClient,
|
||||
sync: &SyncManager,
|
||||
) -> Result<(), FileIdentifierError> {
|
||||
// Assign cas_id to each file path
|
||||
sync.write_ops(
|
||||
db,
|
||||
identified_files
|
||||
.iter()
|
||||
.map(|(pub_id, IdentifiedFile { cas_id, .. })| {
|
||||
(
|
||||
sync.shared_update(
|
||||
prisma_sync::file_path::SyncId {
|
||||
pub_id: uuid_to_bytes(*pub_id),
|
||||
},
|
||||
file_path::cas_id::NAME,
|
||||
msgpack!(cas_id),
|
||||
),
|
||||
db.file_path()
|
||||
.update(
|
||||
file_path::pub_id::equals(uuid_to_bytes(*pub_id)),
|
||||
vec![file_path::cas_id::set(cas_id.clone())],
|
||||
)
|
||||
// We don't need any data here, just the id avoids receiving the entire object
|
||||
// as we can't pass an empty select macro call
|
||||
.select(file_path::select!({ id })),
|
||||
)
|
||||
})
|
||||
.unzip::<_, _, _, Vec<_>>(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn fetch_existing_objects_by_cas_id(
|
||||
identified_files: &HashMap<Uuid, IdentifiedFile>,
|
||||
db: &PrismaClient,
|
||||
) -> Result<HashMap<String, object_for_file_identifier::Data>, FileIdentifierError> {
|
||||
// Retrieves objects that are already connected to file paths with the same id
|
||||
db.object()
|
||||
.find_many(vec![object::file_paths::some(vec![
|
||||
file_path::cas_id::in_vec(
|
||||
identified_files
|
||||
.values()
|
||||
.filter_map(|IdentifiedFile { cas_id, .. }| cas_id.as_ref())
|
||||
.cloned()
|
||||
.collect::<HashSet<_>>()
|
||||
.into_iter()
|
||||
.collect(),
|
||||
),
|
||||
])])
|
||||
.select(object_for_file_identifier::select())
|
||||
.exec()
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
.map(|objects| {
|
||||
objects
|
||||
.into_iter()
|
||||
.filter_map(|object| {
|
||||
object
|
||||
.file_paths
|
||||
.first()
|
||||
.and_then(|file_path| file_path.cas_id.clone())
|
||||
.map(|cas_id| (cas_id, object))
|
||||
})
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
async fn assign_existing_objects_to_file_paths(
|
||||
identified_files: &HashMap<Uuid, IdentifiedFile>,
|
||||
objects_by_cas_id: &HashMap<String, object_for_file_identifier::Data>,
|
||||
db: &PrismaClient,
|
||||
sync: &SyncManager,
|
||||
) -> Result<Vec<file_path_pub_id::Data>, FileIdentifierError> {
|
||||
// Attempt to associate each file path with an object that has been
|
||||
// connected to file paths with the same cas_id
|
||||
sync.write_ops(
|
||||
db,
|
||||
identified_files
|
||||
.iter()
|
||||
.filter_map(|(pub_id, IdentifiedFile { cas_id, .. })| {
|
||||
objects_by_cas_id
|
||||
// Filtering out files without cas_id due to being empty
|
||||
.get(cas_id.as_ref()?)
|
||||
.map(|object| (*pub_id, object))
|
||||
})
|
||||
.map(|(pub_id, object)| {
|
||||
connect_file_path_to_object(
|
||||
pub_id,
|
||||
// SAFETY: This pub_id is generated by the uuid lib, but we have to store bytes in sqlite
|
||||
Uuid::from_slice(&object.pub_id).expect("uuid bytes are invalid"),
|
||||
sync,
|
||||
db,
|
||||
)
|
||||
})
|
||||
.unzip::<_, _, Vec<_>, Vec<_>>(),
|
||||
)
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
fn connect_file_path_to_object<'db>(
|
||||
file_path_pub_id: Uuid,
|
||||
object_pub_id: Uuid,
|
||||
sync: &SyncManager,
|
||||
db: &'db PrismaClient,
|
||||
) -> (CRDTOperation, Select<'db, file_path_pub_id::Data>) {
|
||||
trace!("Connecting <file_path_pub_id={file_path_pub_id}> to <object_pub_id={object_pub_id}'>");
|
||||
|
||||
let vec_id = object_pub_id.as_bytes().to_vec();
|
||||
|
||||
(
|
||||
sync.shared_update(
|
||||
prisma_sync::file_path::SyncId {
|
||||
pub_id: uuid_to_bytes(file_path_pub_id),
|
||||
},
|
||||
file_path::object::NAME,
|
||||
msgpack!(prisma_sync::object::SyncId {
|
||||
pub_id: vec_id.clone()
|
||||
}),
|
||||
),
|
||||
db.file_path()
|
||||
.update(
|
||||
file_path::pub_id::equals(uuid_to_bytes(file_path_pub_id)),
|
||||
vec![file_path::object::connect(object::pub_id::equals(vec_id))],
|
||||
)
|
||||
.select(file_path_pub_id::select()),
|
||||
)
|
||||
}
|
||||
|
||||
async fn create_objects(
|
||||
identified_files: &HashMap<Uuid, IdentifiedFile>,
|
||||
db: &PrismaClient,
|
||||
sync: &SyncManager,
|
||||
) -> Result<u64, FileIdentifierError> {
|
||||
trace!("Creating {} new Objects", identified_files.len(),);
|
||||
|
||||
let (object_create_args, file_path_update_args) = identified_files
|
||||
.iter()
|
||||
.map(
|
||||
|(
|
||||
file_path_pub_id,
|
||||
IdentifiedFile {
|
||||
file_path: file_path_for_file_identifier::Data { date_created, .. },
|
||||
kind,
|
||||
..
|
||||
},
|
||||
)| {
|
||||
let object_pub_id = Uuid::new_v4();
|
||||
|
||||
let kind = *kind as i32;
|
||||
|
||||
let (sync_params, db_params) = [
|
||||
(
|
||||
(object::date_created::NAME, msgpack!(date_created)),
|
||||
object::date_created::set(*date_created),
|
||||
),
|
||||
(
|
||||
(object::kind::NAME, msgpack!(kind)),
|
||||
object::kind::set(Some(kind)),
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
.unzip::<_, _, Vec<_>, Vec<_>>();
|
||||
|
||||
(
|
||||
(
|
||||
sync.shared_create(
|
||||
prisma_sync::object::SyncId {
|
||||
pub_id: uuid_to_bytes(object_pub_id),
|
||||
},
|
||||
sync_params,
|
||||
),
|
||||
object::create_unchecked(uuid_to_bytes(object_pub_id), db_params),
|
||||
),
|
||||
connect_file_path_to_object(*file_path_pub_id, object_pub_id, sync, db),
|
||||
)
|
||||
},
|
||||
)
|
||||
.unzip::<_, _, Vec<_>, Vec<_>>();
|
||||
|
||||
// create new object records with assembled values
|
||||
let total_created_files = sync
|
||||
.write_ops(db, {
|
||||
let (sync, db_params) = object_create_args
|
||||
.into_iter()
|
||||
.unzip::<_, _, Vec<_>, Vec<_>>();
|
||||
|
||||
(
|
||||
sync.into_iter().flatten().collect(),
|
||||
db.object().create_many(db_params),
|
||||
)
|
||||
})
|
||||
.await?;
|
||||
|
||||
trace!("Created {total_created_files} new Objects");
|
||||
|
||||
if total_created_files > 0 {
|
||||
trace!("Updating file paths with created objects");
|
||||
|
||||
sync.write_ops(
|
||||
db,
|
||||
file_path_update_args
|
||||
.into_iter()
|
||||
.unzip::<_, _, Vec<_>, Vec<_>>(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
trace!("Updated file paths with created objects");
|
||||
}
|
||||
|
||||
#[allow(clippy::cast_sign_loss)] // SAFETY: We're sure the value is positive
|
||||
Ok(total_created_files as u64)
|
||||
}
|
||||
|
||||
impl SerializableTask<Error> for ObjectProcessorTask {
|
||||
type SerializeError = rmp_serde::encode::Error;
|
||||
|
||||
type DeserializeError = rmp_serde::decode::Error;
|
||||
|
||||
type DeserializeCtx = (Arc<PrismaClient>, Arc<SyncManager>);
|
||||
|
||||
async fn serialize(self) -> Result<Vec<u8>, Self::SerializeError> {
|
||||
let Self {
|
||||
id,
|
||||
identified_files,
|
||||
metrics,
|
||||
stage,
|
||||
is_shallow,
|
||||
..
|
||||
} = self;
|
||||
|
||||
rmp_serde::to_vec_named(&SaveState {
|
||||
id,
|
||||
identified_files,
|
||||
metrics,
|
||||
stage,
|
||||
is_shallow,
|
||||
})
|
||||
}
|
||||
|
||||
async fn deserialize(
|
||||
data: &[u8],
|
||||
(db, sync): Self::DeserializeCtx,
|
||||
) -> Result<Self, Self::DeserializeError> {
|
||||
rmp_serde::from_slice(data).map(
|
||||
|SaveState {
|
||||
id,
|
||||
identified_files,
|
||||
metrics,
|
||||
stage,
|
||||
is_shallow,
|
||||
}| Self {
|
||||
id,
|
||||
db,
|
||||
sync,
|
||||
identified_files,
|
||||
metrics,
|
||||
stage,
|
||||
is_shallow,
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
|
@ -8,13 +8,15 @@ use crate::{
|
|||
utils::cancel_pending_tasks,
|
||||
SerializableJob, SerializedTasks,
|
||||
},
|
||||
Error, NonCriticalJobError,
|
||||
utils::sub_path::get_full_path_from_sub_path,
|
||||
Error, LocationScanState, NonCriticalJobError,
|
||||
};
|
||||
|
||||
use sd_core_file_path_helper::IsolatedFilePathData;
|
||||
use sd_core_indexer_rules::{IndexerRule, IndexerRuler};
|
||||
use sd_core_prisma_helpers::location_with_indexer_rules;
|
||||
|
||||
use sd_prisma::prisma::location;
|
||||
use sd_task_system::{
|
||||
AnyTaskOutput, IntoTask, SerializableTask, Task, TaskDispatcher, TaskHandle, TaskId,
|
||||
TaskOutput, TaskStatus,
|
||||
|
@ -39,7 +41,7 @@ use tokio::time::Instant;
|
|||
use tracing::warn;
|
||||
|
||||
use super::{
|
||||
determine_initial_walk_path, remove_non_existing_file_paths, reverse_update_directories_sizes,
|
||||
remove_non_existing_file_paths, reverse_update_directories_sizes,
|
||||
tasks::{
|
||||
saver::{SaveTask, SaveTaskOutput},
|
||||
updater::{UpdateTask, UpdateTaskOutput},
|
||||
|
@ -70,6 +72,64 @@ pub struct IndexerJob {
|
|||
impl Job for IndexerJob {
|
||||
const NAME: JobName = JobName::Indexer;
|
||||
|
||||
async fn resume_tasks(
|
||||
&mut self,
|
||||
dispatcher: &JobTaskDispatcher,
|
||||
ctx: &impl JobContext,
|
||||
SerializedTasks(serialized_tasks): SerializedTasks,
|
||||
) -> Result<(), Error> {
|
||||
let location_id = self.location.id;
|
||||
|
||||
self.pending_tasks_on_resume = dispatcher
|
||||
.dispatch_many_boxed(
|
||||
rmp_serde::from_slice::<Vec<(TaskKind, Vec<u8>)>>(&serialized_tasks)
|
||||
.map_err(IndexerError::from)?
|
||||
.into_iter()
|
||||
.map(|(task_kind, task_bytes)| {
|
||||
let indexer_ruler = self.indexer_ruler.clone();
|
||||
let iso_file_path_factory = self.iso_file_path_factory.clone();
|
||||
async move {
|
||||
match task_kind {
|
||||
TaskKind::Walk => WalkDirTask::deserialize(
|
||||
&task_bytes,
|
||||
(
|
||||
indexer_ruler.clone(),
|
||||
WalkerDBProxy {
|
||||
location_id,
|
||||
db: Arc::clone(ctx.db()),
|
||||
},
|
||||
iso_file_path_factory.clone(),
|
||||
dispatcher.clone(),
|
||||
),
|
||||
)
|
||||
.await
|
||||
.map(IntoTask::into_task),
|
||||
|
||||
TaskKind::Save => SaveTask::deserialize(
|
||||
&task_bytes,
|
||||
(Arc::clone(ctx.db()), Arc::clone(ctx.sync())),
|
||||
)
|
||||
.await
|
||||
.map(IntoTask::into_task),
|
||||
TaskKind::Update => UpdateTask::deserialize(
|
||||
&task_bytes,
|
||||
(Arc::clone(ctx.db()), Arc::clone(ctx.sync())),
|
||||
)
|
||||
.await
|
||||
.map(IntoTask::into_task),
|
||||
}
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.try_join()
|
||||
.await
|
||||
.map_err(IndexerError::from)?,
|
||||
)
|
||||
.await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run(
|
||||
mut self,
|
||||
dispatcher: JobTaskDispatcher,
|
||||
|
@ -102,7 +162,7 @@ impl Job for IndexerJob {
|
|||
self.metadata.total_paths += chunked_saves.len() as u64;
|
||||
self.metadata.total_save_steps += 1;
|
||||
|
||||
SaveTask::new(
|
||||
SaveTask::new_deep(
|
||||
self.location.id,
|
||||
self.location.pub_id.clone(),
|
||||
chunked_saves,
|
||||
|
@ -162,6 +222,10 @@ impl Job for IndexerJob {
|
|||
metadata.db_write_time += start_size_update_time.elapsed();
|
||||
}
|
||||
|
||||
if metadata.removed_count > 0 {
|
||||
// TODO: Dispatch a task to remove orphan objects
|
||||
}
|
||||
|
||||
if metadata.indexed_count > 0 || metadata.removed_count > 0 {
|
||||
ctx.invalidate_query("search.paths");
|
||||
}
|
||||
|
@ -171,6 +235,16 @@ impl Job for IndexerJob {
|
|||
"all tasks must be completed here"
|
||||
);
|
||||
|
||||
ctx.db()
|
||||
.location()
|
||||
.update(
|
||||
location::id::equals(location.id),
|
||||
vec![location::scan_state::set(LocationScanState::Indexed as i32)],
|
||||
)
|
||||
.exec()
|
||||
.await
|
||||
.map_err(IndexerError::from)?;
|
||||
|
||||
Ok(ReturnStatus::Completed(
|
||||
JobReturn::builder()
|
||||
.with_metadata(metadata)
|
||||
|
@ -178,64 +252,6 @@ impl Job for IndexerJob {
|
|||
.build(),
|
||||
))
|
||||
}
|
||||
|
||||
async fn resume_tasks(
|
||||
&mut self,
|
||||
dispatcher: &JobTaskDispatcher,
|
||||
ctx: &impl JobContext,
|
||||
SerializedTasks(serialized_tasks): SerializedTasks,
|
||||
) -> Result<(), Error> {
|
||||
let location_id = self.location.id;
|
||||
|
||||
self.pending_tasks_on_resume = dispatcher
|
||||
.dispatch_many_boxed(
|
||||
rmp_serde::from_slice::<Vec<(TaskKind, Vec<u8>)>>(&serialized_tasks)
|
||||
.map_err(IndexerError::from)?
|
||||
.into_iter()
|
||||
.map(|(task_kind, task_bytes)| {
|
||||
let indexer_ruler = self.indexer_ruler.clone();
|
||||
let iso_file_path_factory = self.iso_file_path_factory.clone();
|
||||
async move {
|
||||
match task_kind {
|
||||
TaskKind::Walk => WalkDirTask::deserialize(
|
||||
&task_bytes,
|
||||
(
|
||||
indexer_ruler.clone(),
|
||||
WalkerDBProxy {
|
||||
location_id,
|
||||
db: Arc::clone(ctx.db()),
|
||||
},
|
||||
iso_file_path_factory.clone(),
|
||||
dispatcher.clone(),
|
||||
),
|
||||
)
|
||||
.await
|
||||
.map(IntoTask::into_task),
|
||||
|
||||
TaskKind::Save => SaveTask::deserialize(
|
||||
&task_bytes,
|
||||
(Arc::clone(ctx.db()), Arc::clone(ctx.sync())),
|
||||
)
|
||||
.await
|
||||
.map(IntoTask::into_task),
|
||||
TaskKind::Update => UpdateTask::deserialize(
|
||||
&task_bytes,
|
||||
(Arc::clone(ctx.db()), Arc::clone(ctx.sync())),
|
||||
)
|
||||
.await
|
||||
.map(IntoTask::into_task),
|
||||
}
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.try_join()
|
||||
.await
|
||||
.map_err(IndexerError::from)?,
|
||||
)
|
||||
.await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexerJob {
|
||||
|
@ -282,6 +298,12 @@ impl IndexerJob {
|
|||
job_ctx: &impl JobContext,
|
||||
dispatcher: &JobTaskDispatcher,
|
||||
) -> Result<Vec<TaskHandle<Error>>, IndexerError> {
|
||||
self.metadata.completed_tasks += 1;
|
||||
|
||||
job_ctx.progress(vec![ProgressUpdate::CompletedTaskCount(
|
||||
self.metadata.completed_tasks,
|
||||
)]);
|
||||
|
||||
if any_task_output.is::<WalkTaskOutput>() {
|
||||
return self
|
||||
.process_walk_output(
|
||||
|
@ -310,12 +332,6 @@ impl IndexerJob {
|
|||
unreachable!("Unexpected task output type: <id='{task_id}'>");
|
||||
}
|
||||
|
||||
self.metadata.completed_tasks += 1;
|
||||
|
||||
job_ctx.progress(vec![ProgressUpdate::CompletedTaskCount(
|
||||
self.metadata.completed_tasks,
|
||||
)]);
|
||||
|
||||
Ok(Vec::new())
|
||||
}
|
||||
|
||||
|
@ -394,7 +410,7 @@ impl IndexerJob {
|
|||
self.metadata.total_paths += chunked_saves.len() as u64;
|
||||
self.metadata.total_save_steps += 1;
|
||||
|
||||
SaveTask::new(
|
||||
SaveTask::new_deep(
|
||||
self.location.id,
|
||||
self.location.pub_id.clone(),
|
||||
chunked_saves,
|
||||
|
@ -413,7 +429,7 @@ impl IndexerJob {
|
|||
self.metadata.total_updated_paths += chunked_updates.len() as u64;
|
||||
self.metadata.total_update_steps += 1;
|
||||
|
||||
UpdateTask::new(
|
||||
UpdateTask::new_deep(
|
||||
chunked_updates,
|
||||
Arc::clone(job_ctx.db()),
|
||||
Arc::clone(job_ctx.sync()),
|
||||
|
@ -528,7 +544,7 @@ impl IndexerJob {
|
|||
// if we don't have any pending task, then this is a fresh job
|
||||
if self.pending_tasks_on_resume.is_empty() {
|
||||
let walker_root_path = Arc::new(
|
||||
determine_initial_walk_path(
|
||||
get_full_path_from_sub_path(
|
||||
self.location.id,
|
||||
&self.sub_path,
|
||||
&*self.iso_file_path_factory.location_path,
|
||||
|
@ -539,7 +555,7 @@ impl IndexerJob {
|
|||
|
||||
pending_running_tasks.push(
|
||||
dispatcher
|
||||
.dispatch(WalkDirTask::new(
|
||||
.dispatch(WalkDirTask::new_deep(
|
||||
walker_root_path.as_ref(),
|
||||
Arc::clone(&walker_root_path),
|
||||
self.indexer_ruler.clone(),
|
||||
|
@ -548,7 +564,7 @@ impl IndexerJob {
|
|||
location_id: self.location.id,
|
||||
db: Arc::clone(job_ctx.db()),
|
||||
},
|
||||
Some(dispatcher.clone()),
|
||||
dispatcher.clone(),
|
||||
)?)
|
||||
.await,
|
||||
);
|
||||
|
|
|
@ -1,9 +1,6 @@
|
|||
use crate::NonCriticalJobError;
|
||||
use crate::{utils::sub_path::SubPathError, NonCriticalJobError};
|
||||
|
||||
use sd_core_file_path_helper::{
|
||||
ensure_file_path_exists, ensure_sub_path_is_directory, ensure_sub_path_is_in_location,
|
||||
FilePathError, IsolatedFilePathData,
|
||||
};
|
||||
use sd_core_file_path_helper::{FilePathError, IsolatedFilePathData};
|
||||
use sd_core_indexer_rules::IndexerRuleError;
|
||||
use sd_core_prisma_helpers::{
|
||||
file_path_pub_and_cas_ids, file_path_to_isolate_with_pub_id, file_path_walker,
|
||||
|
@ -53,8 +50,8 @@ pub enum IndexerError {
|
|||
// Not Found errors
|
||||
#[error("indexer rule not found: <id='{0}'>")]
|
||||
IndexerRuleNotFound(i32),
|
||||
#[error("received sub path not in database: <path='{}'>", .0.display())]
|
||||
SubPathNotFound(Box<Path>),
|
||||
#[error(transparent)]
|
||||
SubPath(#[from] SubPathError),
|
||||
|
||||
// Internal Errors
|
||||
#[error("database Error: {0}")]
|
||||
|
@ -78,10 +75,12 @@ pub enum IndexerError {
|
|||
impl From<IndexerError> for rspc::Error {
|
||||
fn from(err: IndexerError) -> Self {
|
||||
match err {
|
||||
IndexerError::IndexerRuleNotFound(_) | IndexerError::SubPathNotFound(_) => {
|
||||
IndexerError::IndexerRuleNotFound(_) => {
|
||||
Self::with_cause(ErrorCode::NotFound, err.to_string(), err)
|
||||
}
|
||||
|
||||
IndexerError::SubPath(sub_path_err) => sub_path_err.into(),
|
||||
|
||||
IndexerError::Rules(rule_err) => rule_err.into(),
|
||||
|
||||
_ => Self::with_cause(ErrorCode::InternalServerError, err.to_string(), err),
|
||||
|
@ -111,36 +110,6 @@ pub enum NonCriticalIndexerError {
|
|||
MissingFilePathData(String),
|
||||
}
|
||||
|
||||
async fn determine_initial_walk_path(
|
||||
location_id: location::id::Type,
|
||||
sub_path: &Option<impl AsRef<Path> + Send + Sync>,
|
||||
location_path: impl AsRef<Path> + Send,
|
||||
db: &PrismaClient,
|
||||
) -> Result<PathBuf, IndexerError> {
|
||||
let location_path = location_path.as_ref();
|
||||
|
||||
match sub_path {
|
||||
Some(sub_path) if sub_path.as_ref() != Path::new("") => {
|
||||
let sub_path = sub_path.as_ref();
|
||||
let full_path = ensure_sub_path_is_in_location(location_path, sub_path).await?;
|
||||
|
||||
ensure_sub_path_is_directory(location_path, sub_path).await?;
|
||||
|
||||
ensure_file_path_exists(
|
||||
sub_path,
|
||||
&IsolatedFilePathData::new(location_id, location_path, &full_path, true)
|
||||
.map_err(IndexerError::from)?,
|
||||
db,
|
||||
IndexerError::SubPathNotFound,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(full_path)
|
||||
}
|
||||
_ => Ok(location_path.to_path_buf()),
|
||||
}
|
||||
}
|
||||
|
||||
fn chunk_db_queries<'db, 'iso>(
|
||||
iso_file_paths: impl IntoIterator<Item = &'iso IsolatedFilePathData<'iso>>,
|
||||
db: &'db PrismaClient,
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use crate::{Error, NonCriticalJobError};
|
||||
use crate::{utils::sub_path::get_full_path_from_sub_path, Error, NonCriticalJobError};
|
||||
|
||||
use sd_core_indexer_rules::{IndexerRule, IndexerRuler};
|
||||
use sd_core_prisma_helpers::location_with_indexer_rules;
|
||||
|
@ -19,7 +19,7 @@ use itertools::Itertools;
|
|||
use tracing::{debug, warn};
|
||||
|
||||
use super::{
|
||||
determine_initial_walk_path, remove_non_existing_file_paths, reverse_update_directories_sizes,
|
||||
remove_non_existing_file_paths, reverse_update_directories_sizes,
|
||||
tasks::{
|
||||
saver::{SaveTask, SaveTaskOutput},
|
||||
updater::{UpdateTask, UpdateTaskOutput},
|
||||
|
@ -45,7 +45,9 @@ pub async fn shallow(
|
|||
.map_err(IndexerError::from)?;
|
||||
|
||||
let to_walk_path = Arc::new(
|
||||
determine_initial_walk_path(location.id, &Some(sub_path), &*location_path, &db).await?,
|
||||
get_full_path_from_sub_path(location.id, &Some(sub_path), &*location_path, &db)
|
||||
.await
|
||||
.map_err(IndexerError::from)?,
|
||||
);
|
||||
|
||||
let Some(WalkTaskOutput {
|
||||
|
@ -124,7 +126,7 @@ async fn walk(
|
|||
dispatcher: &BaseTaskDispatcher<Error>,
|
||||
) -> Result<Option<WalkTaskOutput>, Error> {
|
||||
match dispatcher
|
||||
.dispatch(WalkDirTask::new(
|
||||
.dispatch(WalkDirTask::new_shallow(
|
||||
ToWalkEntry::from(&*to_walk_path),
|
||||
to_walk_path,
|
||||
location
|
||||
|
@ -142,7 +144,6 @@ async fn walk(
|
|||
location_id: location.id,
|
||||
db,
|
||||
},
|
||||
None::<BaseTaskDispatcher<Error>>,
|
||||
)?)
|
||||
.await
|
||||
.await?
|
||||
|
@ -186,7 +187,7 @@ async fn save_and_update(
|
|||
.chunks(BATCH_SIZE)
|
||||
.into_iter()
|
||||
.map(|chunk| {
|
||||
SaveTask::new(
|
||||
SaveTask::new_shallow(
|
||||
location.id,
|
||||
location.pub_id.clone(),
|
||||
chunk.collect::<Vec<_>>(),
|
||||
|
@ -201,7 +202,7 @@ async fn save_and_update(
|
|||
.chunks(BATCH_SIZE)
|
||||
.into_iter()
|
||||
.map(|chunk| {
|
||||
UpdateTask::new(
|
||||
UpdateTask::new_shallow(
|
||||
chunk.collect::<Vec<_>>(),
|
||||
Arc::clone(&db),
|
||||
Arc::clone(&sync),
|
||||
|
|
|
@ -28,11 +28,12 @@ pub struct SaveTask {
|
|||
walked_entries: Vec<WalkedEntry>,
|
||||
db: Arc<PrismaClient>,
|
||||
sync: Arc<SyncManager>,
|
||||
is_shallow: bool,
|
||||
}
|
||||
|
||||
impl SaveTask {
|
||||
#[must_use]
|
||||
pub fn new(
|
||||
pub fn new_deep(
|
||||
location_id: location::id::Type,
|
||||
location_pub_id: location::pub_id::Type,
|
||||
walked_entries: Vec<WalkedEntry>,
|
||||
|
@ -46,6 +47,26 @@ impl SaveTask {
|
|||
walked_entries,
|
||||
db,
|
||||
sync,
|
||||
is_shallow: false,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn new_shallow(
|
||||
location_id: location::id::Type,
|
||||
location_pub_id: location::pub_id::Type,
|
||||
walked_entries: Vec<WalkedEntry>,
|
||||
db: Arc<PrismaClient>,
|
||||
sync: Arc<SyncManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
id: TaskId::new_v4(),
|
||||
location_id,
|
||||
location_pub_id,
|
||||
walked_entries,
|
||||
db,
|
||||
sync,
|
||||
is_shallow: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -56,6 +77,7 @@ struct SaveTaskSaveState {
|
|||
location_id: location::id::Type,
|
||||
location_pub_id: location::pub_id::Type,
|
||||
walked_entries: Vec<WalkedEntry>,
|
||||
is_shallow: bool,
|
||||
}
|
||||
|
||||
impl SerializableTask<Error> for SaveTask {
|
||||
|
@ -71,6 +93,7 @@ impl SerializableTask<Error> for SaveTask {
|
|||
location_id,
|
||||
location_pub_id,
|
||||
walked_entries,
|
||||
is_shallow,
|
||||
..
|
||||
} = self;
|
||||
rmp_serde::to_vec_named(&SaveTaskSaveState {
|
||||
|
@ -78,6 +101,7 @@ impl SerializableTask<Error> for SaveTask {
|
|||
location_id,
|
||||
location_pub_id,
|
||||
walked_entries,
|
||||
is_shallow,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -91,6 +115,7 @@ impl SerializableTask<Error> for SaveTask {
|
|||
location_id,
|
||||
location_pub_id,
|
||||
walked_entries,
|
||||
is_shallow,
|
||||
}| Self {
|
||||
id,
|
||||
location_id,
|
||||
|
@ -98,6 +123,7 @@ impl SerializableTask<Error> for SaveTask {
|
|||
walked_entries,
|
||||
db,
|
||||
sync,
|
||||
is_shallow,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
@ -115,6 +141,11 @@ impl Task<Error> for SaveTask {
|
|||
self.id
|
||||
}
|
||||
|
||||
fn with_priority(&self) -> bool {
|
||||
// If we're running in shallow mode, then we want priority
|
||||
self.is_shallow
|
||||
}
|
||||
|
||||
async fn run(&mut self, _: &Interrupter) -> Result<ExecStatus, Error> {
|
||||
use file_path::{
|
||||
create_unchecked, date_created, date_indexed, date_modified, extension, hidden, inode,
|
||||
|
|
|
@ -28,11 +28,12 @@ pub struct UpdateTask {
|
|||
object_ids_that_should_be_unlinked: HashSet<object::id::Type>,
|
||||
db: Arc<PrismaClient>,
|
||||
sync: Arc<SyncManager>,
|
||||
is_shallow: bool,
|
||||
}
|
||||
|
||||
impl UpdateTask {
|
||||
#[must_use]
|
||||
pub fn new(
|
||||
pub fn new_deep(
|
||||
walked_entries: Vec<WalkedEntry>,
|
||||
db: Arc<PrismaClient>,
|
||||
sync: Arc<SyncManager>,
|
||||
|
@ -43,6 +44,23 @@ impl UpdateTask {
|
|||
db,
|
||||
sync,
|
||||
object_ids_that_should_be_unlinked: HashSet::new(),
|
||||
is_shallow: false,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn new_shallow(
|
||||
walked_entries: Vec<WalkedEntry>,
|
||||
db: Arc<PrismaClient>,
|
||||
sync: Arc<SyncManager>,
|
||||
) -> Self {
|
||||
Self {
|
||||
id: TaskId::new_v4(),
|
||||
walked_entries,
|
||||
db,
|
||||
sync,
|
||||
object_ids_that_should_be_unlinked: HashSet::new(),
|
||||
is_shallow: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -52,6 +70,7 @@ struct UpdateTaskSaveState {
|
|||
id: TaskId,
|
||||
walked_entries: Vec<WalkedEntry>,
|
||||
object_ids_that_should_be_unlinked: HashSet<object::id::Type>,
|
||||
is_shallow: bool,
|
||||
}
|
||||
|
||||
impl SerializableTask<Error> for UpdateTask {
|
||||
|
@ -62,10 +81,19 @@ impl SerializableTask<Error> for UpdateTask {
|
|||
type DeserializeCtx = (Arc<PrismaClient>, Arc<SyncManager>);
|
||||
|
||||
async fn serialize(self) -> Result<Vec<u8>, Self::SerializeError> {
|
||||
let Self {
|
||||
id,
|
||||
walked_entries,
|
||||
object_ids_that_should_be_unlinked,
|
||||
is_shallow,
|
||||
..
|
||||
} = self;
|
||||
|
||||
rmp_serde::to_vec_named(&UpdateTaskSaveState {
|
||||
id: self.id,
|
||||
walked_entries: self.walked_entries,
|
||||
object_ids_that_should_be_unlinked: self.object_ids_that_should_be_unlinked,
|
||||
id,
|
||||
walked_entries,
|
||||
object_ids_that_should_be_unlinked,
|
||||
is_shallow,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -78,12 +106,14 @@ impl SerializableTask<Error> for UpdateTask {
|
|||
id,
|
||||
walked_entries,
|
||||
object_ids_that_should_be_unlinked,
|
||||
is_shallow,
|
||||
}| Self {
|
||||
id,
|
||||
walked_entries,
|
||||
object_ids_that_should_be_unlinked,
|
||||
db,
|
||||
sync,
|
||||
is_shallow,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
@ -101,6 +131,11 @@ impl Task<Error> for UpdateTask {
|
|||
self.id
|
||||
}
|
||||
|
||||
fn with_priority(&self) -> bool {
|
||||
// If we're running in shallow mode, then we want priority
|
||||
self.is_shallow
|
||||
}
|
||||
|
||||
async fn run(&mut self, interrupter: &Interrupter) -> Result<ExecStatus, Error> {
|
||||
use file_path::{
|
||||
cas_id, date_created, date_modified, hidden, inode, is_dir, object, object_id,
|
||||
|
|
|
@ -9,8 +9,8 @@ use sd_core_prisma_helpers::{file_path_pub_and_cas_ids, file_path_walker};
|
|||
|
||||
use sd_prisma::prisma::file_path;
|
||||
use sd_task_system::{
|
||||
check_interruption, ExecStatus, Interrupter, IntoAnyTaskOutput, SerializableTask, Task,
|
||||
TaskDispatcher, TaskHandle, TaskId,
|
||||
check_interruption, BaseTaskDispatcher, ExecStatus, Interrupter, IntoAnyTaskOutput,
|
||||
SerializableTask, Task, TaskDispatcher, TaskHandle, TaskId,
|
||||
};
|
||||
use sd_utils::{db::inode_from_db, error::FileIOError};
|
||||
|
||||
|
@ -239,6 +239,7 @@ struct WalkDirSaveState {
|
|||
stage: WalkerStageSaveState,
|
||||
errors: Vec<NonCriticalJobError>,
|
||||
scan_time: Duration,
|
||||
is_shallow: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
|
@ -351,7 +352,7 @@ impl From<WalkerStageSaveState> for WalkerStage {
|
|||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct WalkDirTask<DBProxy, IsoPathFactory, Dispatcher>
|
||||
pub struct WalkDirTask<DBProxy, IsoPathFactory, Dispatcher = BaseTaskDispatcher<Error>>
|
||||
where
|
||||
DBProxy: WalkerDBProxy,
|
||||
IsoPathFactory: IsoFilePathFactory,
|
||||
|
@ -368,6 +369,7 @@ where
|
|||
maybe_dispatcher: Option<Dispatcher>,
|
||||
errors: Vec<NonCriticalJobError>,
|
||||
scan_time: Duration,
|
||||
is_shallow: bool,
|
||||
}
|
||||
|
||||
impl<DBProxy, IsoPathFactory, Dispatcher> WalkDirTask<DBProxy, IsoPathFactory, Dispatcher>
|
||||
|
@ -376,13 +378,13 @@ where
|
|||
IsoPathFactory: IsoFilePathFactory,
|
||||
Dispatcher: TaskDispatcher<Error>,
|
||||
{
|
||||
pub fn new(
|
||||
pub fn new_deep(
|
||||
entry: impl Into<ToWalkEntry> + Send,
|
||||
root: Arc<PathBuf>,
|
||||
indexer_ruler: IndexerRuler,
|
||||
iso_file_path_factory: IsoPathFactory,
|
||||
db_proxy: DBProxy,
|
||||
maybe_dispatcher: Option<Dispatcher>,
|
||||
dispatcher: Dispatcher,
|
||||
) -> Result<Self, IndexerError> {
|
||||
let entry = entry.into();
|
||||
Ok(Self {
|
||||
|
@ -394,7 +396,38 @@ where
|
|||
db_proxy,
|
||||
stage: WalkerStage::Start,
|
||||
entry,
|
||||
maybe_dispatcher,
|
||||
maybe_dispatcher: Some(dispatcher),
|
||||
is_shallow: false,
|
||||
errors: Vec::new(),
|
||||
scan_time: Duration::ZERO,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<DBProxy, IsoPathFactory> WalkDirTask<DBProxy, IsoPathFactory, BaseTaskDispatcher<Error>>
|
||||
where
|
||||
DBProxy: WalkerDBProxy,
|
||||
IsoPathFactory: IsoFilePathFactory,
|
||||
{
|
||||
pub fn new_shallow(
|
||||
entry: impl Into<ToWalkEntry> + Send,
|
||||
root: Arc<PathBuf>,
|
||||
indexer_ruler: IndexerRuler,
|
||||
iso_file_path_factory: IsoPathFactory,
|
||||
db_proxy: DBProxy,
|
||||
) -> Result<Self, IndexerError> {
|
||||
let entry = entry.into();
|
||||
Ok(Self {
|
||||
id: TaskId::new_v4(),
|
||||
root,
|
||||
indexer_ruler,
|
||||
entry_iso_file_path: iso_file_path_factory.build(&entry.path, true)?,
|
||||
iso_file_path_factory,
|
||||
db_proxy,
|
||||
stage: WalkerStage::Start,
|
||||
entry,
|
||||
maybe_dispatcher: None,
|
||||
is_shallow: true,
|
||||
errors: Vec::new(),
|
||||
scan_time: Duration::ZERO,
|
||||
})
|
||||
|
@ -413,14 +446,26 @@ where
|
|||
type DeserializeCtx = (IndexerRuler, DBProxy, IsoPathFactory, Dispatcher);
|
||||
|
||||
async fn serialize(self) -> Result<Vec<u8>, Self::SerializeError> {
|
||||
let Self {
|
||||
id,
|
||||
entry,
|
||||
root,
|
||||
entry_iso_file_path,
|
||||
stage,
|
||||
errors,
|
||||
scan_time,
|
||||
is_shallow,
|
||||
..
|
||||
} = self;
|
||||
rmp_serde::to_vec_named(&WalkDirSaveState {
|
||||
id: self.id,
|
||||
entry: self.entry,
|
||||
root: self.root,
|
||||
entry_iso_file_path: self.entry_iso_file_path,
|
||||
stage: self.stage.into(),
|
||||
errors: self.errors,
|
||||
scan_time: self.scan_time,
|
||||
id,
|
||||
entry,
|
||||
root,
|
||||
entry_iso_file_path,
|
||||
stage: stage.into(),
|
||||
errors,
|
||||
scan_time,
|
||||
is_shallow,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -437,6 +482,7 @@ where
|
|||
stage,
|
||||
errors,
|
||||
scan_time,
|
||||
is_shallow,
|
||||
}| Self {
|
||||
id,
|
||||
entry,
|
||||
|
@ -446,9 +492,10 @@ where
|
|||
iso_file_path_factory,
|
||||
db_proxy,
|
||||
stage: stage.into(),
|
||||
maybe_dispatcher: Some(dispatcher),
|
||||
maybe_dispatcher: is_shallow.then_some(dispatcher),
|
||||
errors,
|
||||
scan_time,
|
||||
is_shallow,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
@ -466,6 +513,11 @@ where
|
|||
self.id
|
||||
}
|
||||
|
||||
fn with_priority(&self) -> bool {
|
||||
// If we're running in shallow mode, then we want priority
|
||||
self.is_shallow
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_lines)]
|
||||
async fn run(&mut self, interrupter: &Interrupter) -> Result<ExecStatus, Error> {
|
||||
let Self {
|
||||
|
@ -747,13 +799,13 @@ async fn keep_walking(
|
|||
to_keep_walking
|
||||
.drain(..)
|
||||
.map(|entry| {
|
||||
WalkDirTask::new(
|
||||
WalkDirTask::new_deep(
|
||||
entry,
|
||||
Arc::clone(root),
|
||||
indexer_ruler.clone(),
|
||||
iso_file_path_factory.clone(),
|
||||
db_proxy.clone(),
|
||||
Some(dispatcher.clone()),
|
||||
dispatcher.clone(),
|
||||
)
|
||||
.map_err(|e| NonCriticalIndexerError::DispatchKeepWalking(e.to_string()))
|
||||
})
|
||||
|
@ -1226,7 +1278,7 @@ mod tests {
|
|||
|
||||
let handle = system
|
||||
.dispatch(
|
||||
WalkDirTask::new(
|
||||
WalkDirTask::new_deep(
|
||||
root_path.to_path_buf(),
|
||||
Arc::new(root_path.to_path_buf()),
|
||||
indexer_ruler,
|
||||
|
@ -1234,7 +1286,7 @@ mod tests {
|
|||
root_path: Arc::new(root_path.to_path_buf()),
|
||||
},
|
||||
DummyDBProxy,
|
||||
Some(system.get_dispatcher()),
|
||||
system.get_dispatcher(),
|
||||
)
|
||||
.unwrap(),
|
||||
)
|
||||
|
|
|
@ -25,7 +25,10 @@ use futures_concurrency::{
|
|||
use serde::{Deserialize, Serialize};
|
||||
use specta::Type;
|
||||
use strum::{Display, EnumString};
|
||||
use tokio::spawn;
|
||||
use tokio::{
|
||||
spawn,
|
||||
sync::{watch, Mutex},
|
||||
};
|
||||
use tracing::{debug, error, info, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
|
@ -42,6 +45,7 @@ use super::{
|
|||
#[strum(use_phf, serialize_all = "snake_case")]
|
||||
pub enum JobName {
|
||||
Indexer,
|
||||
FileIdentifier,
|
||||
// TODO: Add more job names as needed
|
||||
}
|
||||
|
||||
|
@ -631,7 +635,10 @@ async fn to_spawn_job<Ctx: JobContext>(
|
|||
|
||||
let mut remote_controllers = vec![];
|
||||
|
||||
let (dispatcher, remote_controllers_rx) = JobTaskDispatcher::new(base_dispatcher);
|
||||
let (running_state_tx, running_state_rx) = watch::channel(JobRunningState::Running);
|
||||
|
||||
let (dispatcher, remote_controllers_rx) =
|
||||
JobTaskDispatcher::new(base_dispatcher, running_state_rx);
|
||||
|
||||
if let Some(existing_tasks) = existing_tasks {
|
||||
if let Err(e) = job
|
||||
|
@ -664,6 +671,7 @@ async fn to_spawn_job<Ctx: JobContext>(
|
|||
|
||||
match command {
|
||||
Command::Pause => {
|
||||
running_state_tx.send_modify(|state| *state = JobRunningState::Paused);
|
||||
remote_controllers
|
||||
.iter()
|
||||
.map(TaskRemoteController::pause)
|
||||
|
@ -680,6 +688,8 @@ async fn to_spawn_job<Ctx: JobContext>(
|
|||
});
|
||||
}
|
||||
Command::Resume => {
|
||||
running_state_tx.send_modify(|state| *state = JobRunningState::Running);
|
||||
|
||||
remote_controllers
|
||||
.iter()
|
||||
.map(TaskRemoteController::resume)
|
||||
|
@ -726,14 +736,29 @@ async fn to_spawn_job<Ctx: JobContext>(
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
enum JobRunningState {
|
||||
Running,
|
||||
Paused,
|
||||
}
|
||||
|
||||
impl Default for JobRunningState {
|
||||
fn default() -> Self {
|
||||
Self::Running
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct JobTaskDispatcher {
|
||||
dispatcher: BaseTaskDispatcher<Error>,
|
||||
remote_controllers_tx: chan::Sender<TaskRemoteController>,
|
||||
running_state: Arc<Mutex<watch::Receiver<JobRunningState>>>,
|
||||
}
|
||||
|
||||
impl TaskDispatcher<Error> for JobTaskDispatcher {
|
||||
async fn dispatch_boxed(&self, boxed_task: Box<dyn Task<Error>>) -> TaskHandle<Error> {
|
||||
self.wait_for_dispatch_approval().await;
|
||||
|
||||
let handle = self.dispatcher.dispatch_boxed(boxed_task).await;
|
||||
|
||||
self.remote_controllers_tx
|
||||
|
@ -748,14 +773,9 @@ impl TaskDispatcher<Error> for JobTaskDispatcher {
|
|||
&self,
|
||||
boxed_tasks: impl IntoIterator<Item = Box<dyn Task<Error>>> + Send,
|
||||
) -> Vec<TaskHandle<Error>> {
|
||||
let handles = self.dispatcher.dispatch_many_boxed(boxed_tasks).await;
|
||||
self.wait_for_dispatch_approval().await;
|
||||
|
||||
for handle in &handles {
|
||||
self.remote_controllers_tx
|
||||
.send(handle.remote_controller())
|
||||
.await
|
||||
.expect("remote controllers tx closed");
|
||||
}
|
||||
let handles = self.dispatcher.dispatch_many_boxed(boxed_tasks).await;
|
||||
|
||||
handles
|
||||
.iter()
|
||||
|
@ -770,15 +790,28 @@ impl TaskDispatcher<Error> for JobTaskDispatcher {
|
|||
}
|
||||
|
||||
impl JobTaskDispatcher {
|
||||
fn new(dispatcher: BaseTaskDispatcher<Error>) -> (Self, chan::Receiver<TaskRemoteController>) {
|
||||
fn new(
|
||||
dispatcher: BaseTaskDispatcher<Error>,
|
||||
running_state_rx: watch::Receiver<JobRunningState>,
|
||||
) -> (Self, chan::Receiver<TaskRemoteController>) {
|
||||
let (remote_controllers_tx, remote_controllers_rx) = chan::unbounded();
|
||||
|
||||
(
|
||||
Self {
|
||||
dispatcher,
|
||||
remote_controllers_tx,
|
||||
running_state: Arc::new(Mutex::new(running_state_rx)),
|
||||
},
|
||||
remote_controllers_rx,
|
||||
)
|
||||
}
|
||||
|
||||
async fn wait_for_dispatch_approval(&self) {
|
||||
self.running_state
|
||||
.lock()
|
||||
.await
|
||||
.wait_for(|state| *state == JobRunningState::Running)
|
||||
.await
|
||||
.expect("job running state watch channel unexpectedly closed");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use crate::indexer::IndexerJob;
|
||||
use crate::{file_identifier::FileIdentifierJob, indexer::IndexerJob};
|
||||
|
||||
use sd_prisma::prisma::{job, location};
|
||||
use sd_utils::uuid_to_bytes;
|
||||
|
@ -212,6 +212,7 @@ async fn load_job<Ctx: JobContext>(
|
|||
Ctx,
|
||||
[
|
||||
IndexerJob,
|
||||
FileIdentifierJob,
|
||||
// TODO: Add more jobs here
|
||||
// e.g.: FileIdentifierJob, MediaProcessorJob, etc.,
|
||||
]
|
||||
|
|
|
@ -33,9 +33,12 @@ use serde::{Deserialize, Serialize};
|
|||
use specta::Type;
|
||||
use thiserror::Error;
|
||||
|
||||
pub mod file_identifier;
|
||||
pub mod indexer;
|
||||
pub mod job_system;
|
||||
pub mod utils;
|
||||
|
||||
use file_identifier::{FileIdentifierError, NonCriticalFileIdentifierError};
|
||||
use indexer::{IndexerError, NonCriticalIndexerError};
|
||||
|
||||
pub use job_system::{
|
||||
|
@ -47,6 +50,8 @@ pub use job_system::{
|
|||
pub enum Error {
|
||||
#[error(transparent)]
|
||||
Indexer(#[from] IndexerError),
|
||||
#[error(transparent)]
|
||||
FileIdentifier(#[from] FileIdentifierError),
|
||||
|
||||
#[error(transparent)]
|
||||
TaskSystem(#[from] TaskSystemError),
|
||||
|
@ -56,6 +61,7 @@ impl From<Error> for rspc::Error {
|
|||
fn from(e: Error) -> Self {
|
||||
match e {
|
||||
Error::Indexer(e) => e.into(),
|
||||
Error::FileIdentifier(e) => e.into(),
|
||||
Error::TaskSystem(e) => {
|
||||
Self::with_cause(rspc::ErrorCode::InternalServerError, e.to_string(), e)
|
||||
}
|
||||
|
@ -68,4 +74,15 @@ pub enum NonCriticalJobError {
|
|||
// TODO: Add variants as needed
|
||||
#[error(transparent)]
|
||||
Indexer(#[from] NonCriticalIndexerError),
|
||||
#[error(transparent)]
|
||||
FileIdentifier(#[from] NonCriticalFileIdentifierError),
|
||||
}
|
||||
|
||||
#[repr(i32)]
|
||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize, Type, Eq, PartialEq)]
|
||||
pub enum LocationScanState {
|
||||
Pending = 0,
|
||||
Indexed = 1,
|
||||
FilesIdentified = 2,
|
||||
Completed = 3,
|
||||
}
|
||||
|
|
1
core/crates/heavy-lifting/src/utils/mod.rs
Normal file
1
core/crates/heavy-lifting/src/utils/mod.rs
Normal file
|
@ -0,0 +1 @@
|
|||
pub mod sub_path;
|
93
core/crates/heavy-lifting/src/utils/sub_path.rs
Normal file
93
core/crates/heavy-lifting/src/utils/sub_path.rs
Normal file
|
@ -0,0 +1,93 @@
|
|||
use rspc::ErrorCode;
|
||||
use sd_core_file_path_helper::{
|
||||
ensure_file_path_exists, ensure_sub_path_is_directory, ensure_sub_path_is_in_location,
|
||||
FilePathError, IsolatedFilePathData,
|
||||
};
|
||||
|
||||
use sd_prisma::prisma::{location, PrismaClient};
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use prisma_client_rust::QueryError;
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum SubPathError {
|
||||
#[error("received sub path not in database: <path='{}'>", .0.display())]
|
||||
SubPathNotFound(Box<Path>),
|
||||
|
||||
#[error("database error: {0}")]
|
||||
Database(#[from] QueryError),
|
||||
|
||||
#[error(transparent)]
|
||||
IsoFilePath(#[from] FilePathError),
|
||||
}
|
||||
|
||||
impl From<SubPathError> for rspc::Error {
|
||||
fn from(err: SubPathError) -> Self {
|
||||
match err {
|
||||
SubPathError::SubPathNotFound(_) => {
|
||||
Self::with_cause(ErrorCode::NotFound, err.to_string(), err)
|
||||
}
|
||||
|
||||
_ => Self::with_cause(ErrorCode::InternalServerError, err.to_string(), err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_full_path_from_sub_path(
|
||||
location_id: location::id::Type,
|
||||
sub_path: &Option<impl AsRef<Path> + Send + Sync>,
|
||||
location_path: impl AsRef<Path> + Send,
|
||||
db: &PrismaClient,
|
||||
) -> Result<PathBuf, SubPathError> {
|
||||
let location_path = location_path.as_ref();
|
||||
|
||||
match sub_path {
|
||||
Some(sub_path) if sub_path.as_ref() != Path::new("") => {
|
||||
let sub_path = sub_path.as_ref();
|
||||
let full_path = ensure_sub_path_is_in_location(location_path, sub_path).await?;
|
||||
|
||||
ensure_sub_path_is_directory(location_path, sub_path).await?;
|
||||
|
||||
ensure_file_path_exists(
|
||||
sub_path,
|
||||
&IsolatedFilePathData::new(location_id, location_path, &full_path, true)?,
|
||||
db,
|
||||
SubPathError::SubPathNotFound,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(full_path)
|
||||
}
|
||||
_ => Ok(location_path.to_path_buf()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn maybe_get_iso_file_path_from_sub_path(
|
||||
location_id: location::id::Type,
|
||||
sub_path: &Option<impl AsRef<Path> + Send + Sync>,
|
||||
location_path: impl AsRef<Path> + Send,
|
||||
db: &PrismaClient,
|
||||
) -> Result<Option<IsolatedFilePathData<'static>>, SubPathError> {
|
||||
let location_path = location_path.as_ref();
|
||||
|
||||
match sub_path {
|
||||
Some(sub_path) if sub_path.as_ref() != Path::new("") => {
|
||||
let full_path = ensure_sub_path_is_in_location(location_path, sub_path).await?;
|
||||
ensure_sub_path_is_directory(location_path, sub_path).await?;
|
||||
|
||||
let sub_iso_file_path =
|
||||
IsolatedFilePathData::new(location_id, location_path, &full_path, true)?;
|
||||
|
||||
ensure_file_path_exists(
|
||||
sub_path,
|
||||
&sub_iso_file_path,
|
||||
db,
|
||||
SubPathError::SubPathNotFound,
|
||||
)
|
||||
.await
|
||||
.map(|()| Some(sub_iso_file_path))
|
||||
}
|
||||
_ => Ok(None),
|
||||
}
|
||||
}
|
|
@ -30,6 +30,7 @@
|
|||
use sd_prisma::prisma::{self, file_path, job, label, location, object};
|
||||
|
||||
// File Path selectables!
|
||||
file_path::select!(file_path_pub_id { pub_id });
|
||||
file_path::select!(file_path_pub_and_cas_ids { id pub_id cas_id });
|
||||
file_path::select!(file_path_just_pub_id_materialized_path {
|
||||
pub_id
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
use std::{
|
||||
collections::BTreeMap,
|
||||
ops::Deref,
|
||||
sync::{atomic::Ordering, Arc},
|
||||
};
|
||||
|
@ -7,7 +8,10 @@ use sd_prisma::{
|
|||
prisma::{crdt_operation, SortOrder},
|
||||
prisma_sync::ModelSyncData,
|
||||
};
|
||||
use sd_sync::{CRDTOperation, OperationKind};
|
||||
use sd_sync::{
|
||||
CRDTOperation, CRDTOperationData, CompressedCRDTOperation, CompressedCRDTOperations,
|
||||
OperationKind,
|
||||
};
|
||||
use tokio::sync::{mpsc, oneshot, Mutex};
|
||||
use tracing::debug;
|
||||
use uhlc::{Timestamp, NTP64};
|
||||
|
@ -106,16 +110,20 @@ impl Actor {
|
|||
}
|
||||
}
|
||||
State::Ingesting(event) => {
|
||||
if !event.messages.is_empty() {
|
||||
debug!(
|
||||
"ingesting {} operations: {} to {}",
|
||||
event.messages.len(),
|
||||
event.messages.first().unwrap().timestamp.as_u64(),
|
||||
event.messages.last().unwrap().timestamp.as_u64(),
|
||||
);
|
||||
debug!(
|
||||
"ingesting {} operations: {} to {}",
|
||||
event.messages.len(),
|
||||
event.messages.first().unwrap().3.timestamp.as_u64(),
|
||||
event.messages.last().unwrap().3.timestamp.as_u64(),
|
||||
);
|
||||
|
||||
for op in event.messages {
|
||||
self.receive_crdt_operation(op).await;
|
||||
for (instance, data) in event.messages.0 {
|
||||
for (model, data) in data {
|
||||
for (record, ops) in data {
|
||||
self.receive_crdt_operations(instance, model, record, ops)
|
||||
.await
|
||||
.expect("sync ingest failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -161,105 +169,245 @@ impl Actor {
|
|||
}
|
||||
|
||||
// where the magic happens
|
||||
async fn receive_crdt_operation(
|
||||
async fn receive_crdt_operations(
|
||||
&mut self,
|
||||
mut op: CRDTOperation,
|
||||
instance: Uuid,
|
||||
model: u16,
|
||||
record_id: rmpv::Value,
|
||||
mut ops: Vec<CompressedCRDTOperation>,
|
||||
) -> prisma_client_rust::Result<()> {
|
||||
let db = &self.db;
|
||||
|
||||
ops.sort_by_key(|op| op.timestamp);
|
||||
|
||||
let new_timestamp = ops.last().expect("Empty ops array").timestamp;
|
||||
|
||||
// first, we update the HLC's timestamp with the incoming one.
|
||||
// this involves a drift check + sets the last time of the clock
|
||||
self.clock
|
||||
.update_with_timestamp(&Timestamp::new(op.timestamp, op.instance.into()))
|
||||
.update_with_timestamp(&Timestamp::new(new_timestamp, instance.into()))
|
||||
.expect("timestamp has too much drift!");
|
||||
|
||||
// read the timestamp for the operation's instance, or insert one if it doesn't exist
|
||||
let timestamp = self.timestamps.read().await.get(&op.instance).cloned();
|
||||
let timestamp = self.timestamps.read().await.get(&instance).cloned();
|
||||
|
||||
// copy some fields bc rust ownership
|
||||
let op_instance = op.instance;
|
||||
let op_timestamp = op.timestamp;
|
||||
|
||||
// resolve conflicts
|
||||
// this can be outside the transaction as there's only ever one ingester
|
||||
match &mut op.data {
|
||||
// don't apply Create operations if the record has been deleted
|
||||
sd_sync::CRDTOperationData::Create(_) => {
|
||||
let delete = db
|
||||
.crdt_operation()
|
||||
.find_first(vec![
|
||||
crdt_operation::model::equals(op.model as i32),
|
||||
crdt_operation::record_id::equals(
|
||||
rmp_serde::to_vec(&op.record_id).unwrap(),
|
||||
),
|
||||
crdt_operation::kind::equals(OperationKind::Delete.to_string()),
|
||||
])
|
||||
.order_by(crdt_operation::timestamp::order(SortOrder::Desc))
|
||||
.exec()
|
||||
.await?;
|
||||
|
||||
if delete.is_some() {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
// don't apply Update operations if the record hasn't been created, or a newer Update for the same field has been applied
|
||||
sd_sync::CRDTOperationData::Update { field, .. } => {
|
||||
let (create, update) = db
|
||||
._batch((
|
||||
db.crdt_operation()
|
||||
.find_first(vec![
|
||||
crdt_operation::model::equals(op.model as i32),
|
||||
crdt_operation::record_id::equals(
|
||||
rmp_serde::to_vec(&op.record_id).unwrap(),
|
||||
),
|
||||
crdt_operation::kind::equals(OperationKind::Create.to_string()),
|
||||
])
|
||||
.order_by(crdt_operation::timestamp::order(SortOrder::Desc)),
|
||||
db.crdt_operation()
|
||||
.find_first(vec![
|
||||
crdt_operation::timestamp::gt(op.timestamp.as_u64() as i64),
|
||||
crdt_operation::model::equals(op.model as i32),
|
||||
crdt_operation::record_id::equals(
|
||||
rmp_serde::to_vec(&op.record_id).unwrap(),
|
||||
),
|
||||
crdt_operation::kind::equals(
|
||||
OperationKind::Update(field).to_string(),
|
||||
),
|
||||
])
|
||||
.order_by(crdt_operation::timestamp::order(SortOrder::Desc)),
|
||||
))
|
||||
.await?;
|
||||
|
||||
// we don't care about the contents of the create operation, just that it exists
|
||||
// - all update operations come after creates, no check is necessary
|
||||
if create.is_none() || update.is_some() {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
// Delete - ignores all other messages
|
||||
if let Some(delete_op) = ops
|
||||
.iter()
|
||||
.rev()
|
||||
.find(|op| matches!(op.data, sd_sync::CRDTOperationData::Delete))
|
||||
{
|
||||
// deletes are the be all and end all, no need to check anything
|
||||
sd_sync::CRDTOperationData::Delete => {}
|
||||
};
|
||||
|
||||
// we don't want these writes to not apply together!
|
||||
self.db
|
||||
._transaction()
|
||||
.with_timeout(30 * 1000)
|
||||
.run(|db| async move {
|
||||
// apply the operation to the actual record
|
||||
ModelSyncData::from_op(op.clone())
|
||||
let op = CRDTOperation {
|
||||
instance,
|
||||
model,
|
||||
record_id,
|
||||
timestamp: delete_op.timestamp,
|
||||
data: CRDTOperationData::Delete,
|
||||
};
|
||||
|
||||
self.db
|
||||
._transaction()
|
||||
.with_timeout(30 * 1000)
|
||||
.run(|db| async move {
|
||||
ModelSyncData::from_op(op.clone())
|
||||
.unwrap()
|
||||
.exec(&db)
|
||||
.await?;
|
||||
write_crdt_op_to_db(&op, &db).await?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.await?;
|
||||
}
|
||||
// Create + > 0 Update - overwrites the create's data with the updates
|
||||
else if let Some(timestamp) = ops.iter().rev().find_map(|op| {
|
||||
if let sd_sync::CRDTOperationData::Create(_) = &op.data {
|
||||
return Some(op.timestamp);
|
||||
}
|
||||
|
||||
None
|
||||
}) {
|
||||
// conflict resolution
|
||||
let delete = db
|
||||
.crdt_operation()
|
||||
.find_first(vec![
|
||||
crdt_operation::model::equals(model as i32),
|
||||
crdt_operation::record_id::equals(rmp_serde::to_vec(&record_id).unwrap()),
|
||||
crdt_operation::kind::equals(OperationKind::Delete.to_string()),
|
||||
])
|
||||
.order_by(crdt_operation::timestamp::order(SortOrder::Desc))
|
||||
.exec()
|
||||
.await?;
|
||||
|
||||
if delete.is_some() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut data = BTreeMap::new();
|
||||
|
||||
let mut applied_ops = vec![];
|
||||
|
||||
// search for all Updates until a Create is found
|
||||
for op in ops.iter().rev() {
|
||||
match &op.data {
|
||||
CRDTOperationData::Delete => unreachable!("Delete can't exist here!"),
|
||||
CRDTOperationData::Create(create_data) => {
|
||||
for (k, v) in create_data {
|
||||
data.entry(k).or_insert(v);
|
||||
}
|
||||
|
||||
applied_ops.push(op);
|
||||
|
||||
break;
|
||||
}
|
||||
CRDTOperationData::Update { field, value } => {
|
||||
applied_ops.push(op);
|
||||
data.insert(field, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.db
|
||||
._transaction()
|
||||
.with_timeout(30 * 1000)
|
||||
.run(|db| async move {
|
||||
// fake a create with a bunch of data rather than individual insert
|
||||
ModelSyncData::from_op(CRDTOperation {
|
||||
instance,
|
||||
model,
|
||||
record_id: record_id.clone(),
|
||||
timestamp,
|
||||
data: CRDTOperationData::Create(
|
||||
data.into_iter()
|
||||
.map(|(k, v)| (k.clone(), v.clone()))
|
||||
.collect(),
|
||||
),
|
||||
})
|
||||
.unwrap()
|
||||
.exec(&db)
|
||||
.await
|
||||
.unwrap();
|
||||
.await?;
|
||||
|
||||
// write the operation to the operations table
|
||||
write_crdt_op_to_db(&op, &db).await
|
||||
})
|
||||
.await?;
|
||||
for op in applied_ops {
|
||||
write_crdt_op_to_db(
|
||||
&CRDTOperation {
|
||||
instance,
|
||||
model,
|
||||
record_id: record_id.clone(),
|
||||
timestamp: op.timestamp,
|
||||
data: op.data.clone(),
|
||||
},
|
||||
&db,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.await?;
|
||||
}
|
||||
// > 0 Update - batches updates with a fake Create op
|
||||
else {
|
||||
let mut data = BTreeMap::new();
|
||||
|
||||
for op in ops.into_iter().rev() {
|
||||
let CRDTOperationData::Update { field, value } = op.data else {
|
||||
unreachable!("Create + Delete should be filtered out!");
|
||||
};
|
||||
|
||||
data.insert(field, (value, op.timestamp));
|
||||
}
|
||||
|
||||
// conflict resolution
|
||||
let (create, updates) = db
|
||||
._batch((
|
||||
db.crdt_operation()
|
||||
.find_first(vec![
|
||||
crdt_operation::model::equals(model as i32),
|
||||
crdt_operation::record_id::equals(
|
||||
rmp_serde::to_vec(&record_id).unwrap(),
|
||||
),
|
||||
crdt_operation::kind::equals(OperationKind::Create.to_string()),
|
||||
])
|
||||
.order_by(crdt_operation::timestamp::order(SortOrder::Desc)),
|
||||
data.iter()
|
||||
.map(|(k, (_, timestamp))| {
|
||||
db.crdt_operation()
|
||||
.find_first(vec![
|
||||
crdt_operation::timestamp::gt(timestamp.as_u64() as i64),
|
||||
crdt_operation::model::equals(model as i32),
|
||||
crdt_operation::record_id::equals(
|
||||
rmp_serde::to_vec(&record_id).unwrap(),
|
||||
),
|
||||
crdt_operation::kind::equals(
|
||||
OperationKind::Update(k).to_string(),
|
||||
),
|
||||
])
|
||||
.order_by(crdt_operation::timestamp::order(SortOrder::Desc))
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
))
|
||||
.await?;
|
||||
|
||||
if create.is_none() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// does the same thing as processing ops one-by-one and returning early if a newer op was found
|
||||
for (update, key) in updates
|
||||
.into_iter()
|
||||
.zip(data.keys().cloned().collect::<Vec<_>>())
|
||||
{
|
||||
if update.is_some() {
|
||||
data.remove(&key);
|
||||
}
|
||||
}
|
||||
|
||||
self.db
|
||||
._transaction()
|
||||
.with_timeout(30 * 1000)
|
||||
.run(|db| async move {
|
||||
// fake operation to batch them all at once
|
||||
ModelSyncData::from_op(CRDTOperation {
|
||||
instance,
|
||||
model,
|
||||
record_id: record_id.clone(),
|
||||
timestamp: NTP64(0),
|
||||
data: CRDTOperationData::Create(
|
||||
data.iter()
|
||||
.map(|(k, (data, _))| (k.to_string(), data.clone()))
|
||||
.collect(),
|
||||
),
|
||||
})
|
||||
.unwrap()
|
||||
.exec(&db)
|
||||
.await?;
|
||||
|
||||
// need to only apply ops that haven't been filtered out
|
||||
for (field, (value, timestamp)) in data {
|
||||
write_crdt_op_to_db(
|
||||
&CRDTOperation {
|
||||
instance,
|
||||
model,
|
||||
record_id: record_id.clone(),
|
||||
timestamp,
|
||||
data: CRDTOperationData::Update { field, value },
|
||||
},
|
||||
&db,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.await?;
|
||||
}
|
||||
|
||||
// update the stored timestamp for this instance - will be derived from the crdt operations table on restart
|
||||
let new_ts = NTP64::max(timestamp.unwrap_or_default(), op_timestamp);
|
||||
self.timestamps.write().await.insert(op_instance, new_ts);
|
||||
let new_ts = NTP64::max(timestamp.unwrap_or_default(), new_timestamp);
|
||||
|
||||
self.timestamps.write().await.insert(instance, new_ts);
|
||||
|
||||
self.io.req_tx.send(Request::Ingested).await.ok();
|
||||
|
||||
|
@ -283,7 +431,7 @@ pub struct Handler {
|
|||
#[derive(Debug)]
|
||||
pub struct MessagesEvent {
|
||||
pub instance_id: Uuid,
|
||||
pub messages: Vec<CRDTOperation>,
|
||||
pub messages: CompressedCRDTOperations,
|
||||
pub has_more: bool,
|
||||
}
|
||||
|
||||
|
|
|
@ -104,11 +104,13 @@ impl Manager {
|
|||
))
|
||||
.await?;
|
||||
|
||||
self.shared
|
||||
.timestamps
|
||||
.write()
|
||||
.await
|
||||
.insert(self.instance, ops.last().unwrap().timestamp);
|
||||
if let Some(last) = ops.last() {
|
||||
self.shared
|
||||
.timestamps
|
||||
.write()
|
||||
.await
|
||||
.insert(self.instance, last.timestamp);
|
||||
}
|
||||
|
||||
self.tx.send(SyncMessage::Created).ok();
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
use sd_core_sync::*;
|
||||
use sd_prisma::prisma;
|
||||
use sd_sync::CompressedCRDTOperations;
|
||||
use sd_utils::uuid_to_bytes;
|
||||
|
||||
use prisma_client_rust::chrono::Utc;
|
||||
|
@ -122,7 +123,7 @@ impl Instance {
|
|||
ingest
|
||||
.event_tx
|
||||
.send(ingest::Event::Messages(ingest::MessagesEvent {
|
||||
messages,
|
||||
messages: CompressedCRDTOperations::new(messages),
|
||||
has_more: false,
|
||||
instance_id: instance1.id,
|
||||
}))
|
||||
|
|
|
@ -561,7 +561,7 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
|
|||
);
|
||||
|
||||
#[cfg(not(any(target_os = "ios", target_os = "android")))]
|
||||
trash::delete(&full_path).unwrap();
|
||||
trash::delete(full_path).unwrap();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -343,7 +343,6 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
|
|||
R.with2(library())
|
||||
.subscription(|(node, _), _: ()| async move {
|
||||
// TODO: Only return event for the library that was subscribed to
|
||||
|
||||
let mut event_bus_rx = node.event_bus.0.subscribe();
|
||||
async_stream::stream! {
|
||||
while let Ok(event) = event_bus_rx.recv().await {
|
||||
|
@ -355,4 +354,19 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
|
|||
}
|
||||
})
|
||||
})
|
||||
.procedure("newFilePathIdentified", {
|
||||
R.with2(library())
|
||||
.subscription(|(node, _), _: ()| async move {
|
||||
// TODO: Only return event for the library that was subscribed to
|
||||
let mut event_bus_rx = node.event_bus.0.subscribe();
|
||||
async_stream::stream! {
|
||||
while let Ok(event) = event_bus_rx.recv().await {
|
||||
match event {
|
||||
CoreEvent::NewIdentifiedObjects { file_path_ids } => yield file_path_ids,
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
|
|
@ -2,8 +2,8 @@ use crate::{
|
|||
invalidate_query,
|
||||
location::{
|
||||
delete_location, find_location, indexer::OldIndexerJobInit, light_scan_location,
|
||||
relink_location, scan_location, scan_location_sub_path, LocationCreateArgs, LocationError,
|
||||
LocationUpdateArgs, ScanState,
|
||||
non_indexed::NonIndexedPathItem, relink_location, scan_location, scan_location_sub_path,
|
||||
LocationCreateArgs, LocationError, LocationUpdateArgs, ScanState,
|
||||
},
|
||||
object::old_file_identifier::old_file_identifier_job::OldFileIdentifierJobInit,
|
||||
old_job::StatefulJob,
|
||||
|
@ -17,7 +17,6 @@ use sd_core_prisma_helpers::{
|
|||
};
|
||||
|
||||
use sd_cache::{CacheNode, Model, Normalise, NormalisedResult, NormalisedResults, Reference};
|
||||
use sd_indexer::NonIndexedPathItem;
|
||||
use sd_prisma::prisma::{file_path, indexer_rule, indexer_rules_in_location, location, SortOrder};
|
||||
|
||||
use std::path::{Path, PathBuf};
|
||||
|
@ -39,20 +38,26 @@ pub type ThumbnailKey = Vec<String>;
|
|||
#[serde(tag = "type")]
|
||||
pub enum ExplorerItem {
|
||||
Path {
|
||||
// provide the frontend with the thumbnail key explicitly
|
||||
thumbnail: Option<ThumbnailKey>,
|
||||
// this tells the frontend if a thumbnail actually exists or not
|
||||
has_created_thumbnail: bool,
|
||||
// we can't actually modify data from PCR types, thats why computed properties are used on ExplorerItem
|
||||
item: file_path_with_object::Data,
|
||||
},
|
||||
Object {
|
||||
thumbnail: Option<ThumbnailKey>,
|
||||
has_created_thumbnail: bool,
|
||||
item: object_with_file_paths::Data,
|
||||
},
|
||||
Location {
|
||||
item: location::Data,
|
||||
},
|
||||
NonIndexedPath {
|
||||
thumbnail: Option<ThumbnailKey>,
|
||||
has_created_thumbnail: bool,
|
||||
item: NonIndexedPathItem,
|
||||
},
|
||||
Location {
|
||||
item: location::Data,
|
||||
},
|
||||
SpacedropPeer {
|
||||
item: PeerMetadata,
|
||||
},
|
||||
|
|
|
@ -1,16 +1,17 @@
|
|||
use crate::{
|
||||
invalidate_query,
|
||||
node::{
|
||||
config::{NodeConfig, NodePreferences, P2PDiscoveryState, Port},
|
||||
config::{NodeConfig, NodeConfigP2P, NodePreferences},
|
||||
get_hardware_model_name, HardwareModel,
|
||||
},
|
||||
old_job::JobProgressEvent,
|
||||
p2p::{into_listener2, Listener2},
|
||||
Node,
|
||||
};
|
||||
|
||||
use sd_cache::patch_typedef;
|
||||
use sd_p2p::RemoteIdentity;
|
||||
use sd_prisma::prisma::file_path;
|
||||
|
||||
use std::sync::{atomic::Ordering, Arc};
|
||||
|
||||
use itertools::Itertools;
|
||||
|
@ -53,7 +54,12 @@ pub type Router = rspc::Router<Ctx>;
|
|||
/// Represents an internal core event, these are exposed to client via a rspc subscription.
|
||||
#[derive(Debug, Clone, Serialize, Type)]
|
||||
pub enum CoreEvent {
|
||||
NewThumbnail { thumb_key: Vec<String> },
|
||||
NewThumbnail {
|
||||
thumb_key: Vec<String>,
|
||||
},
|
||||
NewIdentifiedObjects {
|
||||
file_path_ids: Vec<file_path::id::Type>,
|
||||
},
|
||||
JobProgress(JobProgressEvent),
|
||||
InvalidateOperation(InvalidateOperationEvent),
|
||||
}
|
||||
|
@ -64,16 +70,12 @@ pub enum CoreEvent {
|
|||
#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize, Type)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum BackendFeature {
|
||||
FilesOverP2P,
|
||||
CloudSync,
|
||||
}
|
||||
|
||||
impl BackendFeature {
|
||||
pub fn restore(&self, node: &Node) {
|
||||
match self {
|
||||
BackendFeature::FilesOverP2P => {
|
||||
node.files_over_p2p_flag.store(true, Ordering::Relaxed);
|
||||
}
|
||||
BackendFeature::CloudSync => {
|
||||
node.cloud_sync_flag.store(true, Ordering::Relaxed);
|
||||
}
|
||||
|
@ -89,9 +91,7 @@ pub struct SanitisedNodeConfig {
|
|||
/// name is the display name of the current node. This is set by the user and is shown in the UI. // TODO: Length validation so it can fit in DNS record
|
||||
pub name: String,
|
||||
pub identity: RemoteIdentity,
|
||||
pub p2p_ipv4_port: Port,
|
||||
pub p2p_ipv6_port: Port,
|
||||
pub p2p_discovery: P2PDiscoveryState,
|
||||
pub p2p: NodeConfigP2P,
|
||||
pub features: Vec<BackendFeature>,
|
||||
pub preferences: NodePreferences,
|
||||
pub image_labeler_version: Option<String>,
|
||||
|
@ -103,9 +103,7 @@ impl From<NodeConfig> for SanitisedNodeConfig {
|
|||
id: value.id,
|
||||
name: value.name,
|
||||
identity: value.identity.to_remote_identity(),
|
||||
p2p_ipv4_port: value.p2p_ipv4_port,
|
||||
p2p_ipv6_port: value.p2p_ipv6_port,
|
||||
p2p_discovery: value.p2p_discovery,
|
||||
p2p: value.p2p,
|
||||
features: value.features,
|
||||
preferences: value.preferences,
|
||||
image_labeler_version: value.image_labeler_version,
|
||||
|
@ -118,7 +116,6 @@ struct NodeState {
|
|||
#[serde(flatten)]
|
||||
config: SanitisedNodeConfig,
|
||||
data_path: String,
|
||||
listeners: Vec<Listener2>,
|
||||
device_model: Option<String>,
|
||||
}
|
||||
|
||||
|
@ -154,7 +151,6 @@ pub(crate) fn mount() -> Arc<Router> {
|
|||
.to_str()
|
||||
.expect("Found non-UTF-8 path")
|
||||
.to_string(),
|
||||
listeners: into_listener2(&node.p2p.p2p.listeners()),
|
||||
device_model: Some(device_model),
|
||||
})
|
||||
})
|
||||
|
@ -181,9 +177,6 @@ pub(crate) fn mount() -> Arc<Router> {
|
|||
.map_err(|err| rspc::Error::new(ErrorCode::InternalServerError, err.to_string()))?;
|
||||
|
||||
match feature {
|
||||
BackendFeature::FilesOverP2P => {
|
||||
node.files_over_p2p_flag.store(enabled, Ordering::Relaxed);
|
||||
}
|
||||
BackendFeature::CloudSync => {
|
||||
node.cloud_sync_flag.store(enabled, Ordering::Relaxed);
|
||||
}
|
||||
|
|
|
@ -19,9 +19,11 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
|
|||
#[derive(Deserialize, Type)]
|
||||
pub struct ChangeNodeNameArgs {
|
||||
pub name: Option<String>,
|
||||
pub p2p_ipv4_port: Option<Port>,
|
||||
pub p2p_ipv6_port: Option<Port>,
|
||||
pub p2p_port: Option<Port>,
|
||||
pub p2p_ipv4_enabled: Option<bool>,
|
||||
pub p2p_ipv6_enabled: Option<bool>,
|
||||
pub p2p_discovery: Option<P2PDiscoveryState>,
|
||||
pub p2p_remote_access: Option<bool>,
|
||||
pub image_labeler_version: Option<String>,
|
||||
}
|
||||
R.mutation(|node, args: ChangeNodeNameArgs| async move {
|
||||
|
@ -43,14 +45,20 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
|
|||
config.name = name;
|
||||
}
|
||||
|
||||
if let Some(port) = args.p2p_ipv4_port {
|
||||
config.p2p_ipv4_port = port;
|
||||
if let Some(port) = args.p2p_port {
|
||||
config.p2p.port = port;
|
||||
};
|
||||
if let Some(port) = args.p2p_ipv6_port {
|
||||
config.p2p_ipv6_port = port;
|
||||
if let Some(enabled) = args.p2p_ipv4_enabled {
|
||||
config.p2p.ipv4 = enabled;
|
||||
};
|
||||
if let Some(v) = args.p2p_discovery {
|
||||
config.p2p_discovery = v;
|
||||
if let Some(enabled) = args.p2p_ipv6_enabled {
|
||||
config.p2p.ipv6 = enabled;
|
||||
};
|
||||
if let Some(discovery) = args.p2p_discovery {
|
||||
config.p2p.discovery = discovery;
|
||||
};
|
||||
if let Some(remote_access) = args.p2p_remote_access {
|
||||
config.p2p.remote_access = remote_access;
|
||||
};
|
||||
|
||||
#[cfg(feature = "ai")]
|
||||
|
|
|
@ -3,9 +3,9 @@ use crate::p2p::{operations, ConnectionMethod, DiscoveryMethod, Header, P2PEvent
|
|||
use sd_p2p::{PeerConnectionCandidate, RemoteIdentity};
|
||||
|
||||
use rspc::{alpha::AlphaRouter, ErrorCode};
|
||||
use serde::Deserialize;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use specta::Type;
|
||||
use std::path::PathBuf;
|
||||
use std::{path::PathBuf, sync::PoisonError};
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use uuid::Uuid;
|
||||
|
||||
|
@ -58,6 +58,54 @@ pub(crate) fn mount() -> AlphaRouter<Ctx> {
|
|||
.procedure("state", {
|
||||
R.query(|node, _: ()| async move { Ok(node.p2p.state().await) })
|
||||
})
|
||||
.procedure("listeners", {
|
||||
#[derive(Serialize, Type)]
|
||||
#[serde(tag = "type")]
|
||||
pub enum ListenerState {
|
||||
Listening,
|
||||
Error { error: String },
|
||||
Disabled,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Type)]
|
||||
pub struct Listeners {
|
||||
ipv4: ListenerState,
|
||||
ipv6: ListenerState,
|
||||
}
|
||||
|
||||
R.query(|node, _: ()| async move {
|
||||
let addrs = node
|
||||
.p2p
|
||||
.p2p
|
||||
.listeners()
|
||||
.iter()
|
||||
.flat_map(|l| l.addrs.clone())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let errors = node
|
||||
.p2p
|
||||
.listener_errors
|
||||
.lock()
|
||||
.unwrap_or_else(PoisonError::into_inner);
|
||||
|
||||
Ok(Listeners {
|
||||
ipv4: match errors.ipv4 {
|
||||
Some(ref err) => ListenerState::Error { error: err.clone() },
|
||||
None => match addrs.iter().any(|f| f.is_ipv4()) {
|
||||
true => ListenerState::Listening,
|
||||
false => ListenerState::Disabled,
|
||||
},
|
||||
},
|
||||
ipv6: match errors.ipv6 {
|
||||
Some(ref err) => ListenerState::Error { error: err.clone() },
|
||||
None => match addrs.iter().any(|f| f.is_ipv6()) {
|
||||
true => ListenerState::Listening,
|
||||
false => ListenerState::Disabled,
|
||||
},
|
||||
},
|
||||
})
|
||||
})
|
||||
})
|
||||
.procedure("debugConnect", {
|
||||
R.mutation(|node, identity: RemoteIdentity| async move {
|
||||
let peer = { node.p2p.p2p.peers().get(&identity).cloned() };
|
||||
|
|
|
@ -1,34 +1,24 @@
|
|||
use std::{collections::HashMap, path::PathBuf};
|
||||
|
||||
use crate::{
|
||||
api::{locations::ExplorerItem, utils::library},
|
||||
library::Library,
|
||||
location::LocationError,
|
||||
object::{
|
||||
cas::generate_cas_id,
|
||||
media::old_thumbnail::{
|
||||
get_ephemeral_thumb_key, get_indexed_thumb_key, BatchToProcess, GenerateThumbnailArgs,
|
||||
},
|
||||
},
|
||||
location::{non_indexed, LocationError},
|
||||
object::media::old_thumbnail::get_indexed_thumb_key,
|
||||
util::{unsafe_streamed_query, BatchedStream},
|
||||
};
|
||||
|
||||
use opendal::{services::Fs, Operator};
|
||||
use sd_core_prisma_helpers::{file_path_with_object, object_with_file_paths};
|
||||
|
||||
use sd_cache::{CacheNode, Model, Normalise, Reference};
|
||||
use sd_core_indexer_rules::seed::{no_hidden, no_os_protected};
|
||||
use sd_core_indexer_rules::IndexerRule;
|
||||
use sd_core_prisma_helpers::{file_path_with_object, object_with_file_paths};
|
||||
use sd_file_ext::kind::ObjectKind;
|
||||
use sd_prisma::prisma::{self, location, PrismaClient};
|
||||
use sd_utils::chain_optional_iter;
|
||||
use sd_prisma::prisma::{self, PrismaClient};
|
||||
|
||||
use std::path::PathBuf;
|
||||
|
||||
use async_stream::stream;
|
||||
use futures::StreamExt;
|
||||
use itertools::Either;
|
||||
use rspc::{alpha::AlphaRouter, ErrorCode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use specta::Type;
|
||||
use tracing::{error, warn};
|
||||
|
||||
pub mod file_path;
|
||||
pub mod media_data;
|
||||
|
@ -69,170 +59,98 @@ impl SearchFilterArgs {
|
|||
file_path: &mut Vec<prisma::file_path::WhereParam>,
|
||||
object: &mut Vec<prisma::object::WhereParam>,
|
||||
) -> Result<(), rspc::Error> {
|
||||
Ok(match self {
|
||||
match self {
|
||||
Self::FilePath(v) => file_path.extend(v.into_params(db).await?),
|
||||
Self::Object(v) => object.extend(v.into_params()),
|
||||
})
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn mount() -> AlphaRouter<Ctx> {
|
||||
R.router()
|
||||
.procedure("ephemeralPaths", {
|
||||
#[derive(Deserialize, Type, Debug, PartialEq, Eq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
enum PathFrom {
|
||||
Path,
|
||||
// TODO: FTP + S3 + GDrive
|
||||
#[derive(Serialize, Deserialize, Type, Debug, Clone)]
|
||||
#[serde(rename_all = "camelCase", tag = "field", content = "value")]
|
||||
enum EphemeralPathOrder {
|
||||
Name(SortOrder),
|
||||
SizeInBytes(SortOrder),
|
||||
DateCreated(SortOrder),
|
||||
DateModified(SortOrder),
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Type, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct EphemeralPathSearchArgs {
|
||||
from: PathFrom,
|
||||
path: String,
|
||||
path: PathBuf,
|
||||
with_hidden_files: bool,
|
||||
#[specta(optional)]
|
||||
order: Option<EphemeralPathOrder>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Type, Debug)]
|
||||
struct EphemeralPathsResultItem {
|
||||
pub entries: Vec<Reference<ExplorerItem>>,
|
||||
pub errors: Vec<String>,
|
||||
pub errors: Vec<rspc::Error>,
|
||||
pub nodes: Vec<CacheNode>,
|
||||
}
|
||||
|
||||
R.with2(library()).subscription(
|
||||
|(node, library),
|
||||
EphemeralPathSearchArgs {
|
||||
from,
|
||||
mut path,
|
||||
path,
|
||||
with_hidden_files,
|
||||
order,
|
||||
}| async move {
|
||||
let service = match from {
|
||||
PathFrom::Path => {
|
||||
let mut fs = Fs::default();
|
||||
fs.root("/");
|
||||
Operator::new(fs)
|
||||
.map_err(|err| {
|
||||
rspc::Error::new(
|
||||
ErrorCode::InternalServerError,
|
||||
err.to_string(),
|
||||
)
|
||||
})?
|
||||
.finish()
|
||||
}
|
||||
};
|
||||
let paths =
|
||||
non_indexed::walk(path, with_hidden_files, node, library, |entries| {
|
||||
macro_rules! order_match {
|
||||
($order:ident, [$(($variant:ident, |$i:ident| $func:expr)),+]) => {{
|
||||
match $order {
|
||||
$(EphemeralPathOrder::$variant(order) => {
|
||||
entries.sort_unstable_by(|path1, path2| {
|
||||
let func = |$i: &non_indexed::Entry| $func;
|
||||
|
||||
let rules = chain_optional_iter(
|
||||
[IndexerRule::from(no_os_protected())],
|
||||
[(!with_hidden_files).then(|| IndexerRule::from(no_hidden()))],
|
||||
);
|
||||
let one = func(path1);
|
||||
let two = func(path2);
|
||||
|
||||
// OpenDAL is specific about paths (and the rest of Spacedrive is not)
|
||||
if !path.ends_with('/') {
|
||||
path.push('/');
|
||||
}
|
||||
match order {
|
||||
SortOrder::Desc => two.cmp(&one),
|
||||
SortOrder::Asc => one.cmp(&two),
|
||||
}
|
||||
});
|
||||
})+
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
let stream =
|
||||
sd_indexer::ephemeral(service, rules, &path)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
rspc::Error::new(ErrorCode::InternalServerError, err.to_string())
|
||||
})?;
|
||||
if let Some(order) = order {
|
||||
order_match!(
|
||||
order,
|
||||
[
|
||||
(Name, |p| p.name().to_lowercase()),
|
||||
(SizeInBytes, |p| p.size_in_bytes()),
|
||||
(DateCreated, |p| p.date_created()),
|
||||
(DateModified, |p| p.date_modified())
|
||||
]
|
||||
)
|
||||
}
|
||||
})
|
||||
.await?;
|
||||
|
||||
let mut stream = BatchedStream::new(stream);
|
||||
let mut stream = BatchedStream::new(paths);
|
||||
Ok(unsafe_streamed_query(stream! {
|
||||
let mut to_generate = vec![];
|
||||
|
||||
while let Some(result) = stream.next().await {
|
||||
// We optimize for the case of no errors because it should be way more common.
|
||||
let mut entries = Vec::with_capacity(result.len());
|
||||
let mut errors = Vec::with_capacity(0);
|
||||
|
||||
// For this batch we check if any directories are actually locations, so the UI can link directly to them
|
||||
let locations = library
|
||||
.db
|
||||
.location()
|
||||
.find_many(vec![location::path::in_vec(
|
||||
result.iter().filter_map(|e| match e {
|
||||
Ok(e) if ObjectKind::from_i32(e.kind) == ObjectKind::Folder => Some(e.path.clone()),
|
||||
_ => None
|
||||
}).collect::<Vec<_>>()
|
||||
)])
|
||||
.exec()
|
||||
.await
|
||||
.and_then(|l| {
|
||||
Ok(l.into_iter()
|
||||
.filter_map(|item| item.path.clone().map(|l| (l, item)))
|
||||
.collect::<HashMap<_, _>>())
|
||||
})
|
||||
.map_err(|err| error!("Looking up locations failed: {err:?}"))
|
||||
.unwrap_or_default();
|
||||
|
||||
for item in result {
|
||||
match item {
|
||||
Ok(item) => {
|
||||
let kind = ObjectKind::from_i32(item.kind);
|
||||
let should_generate_thumbnail = {
|
||||
#[cfg(feature = "ffmpeg")]
|
||||
{
|
||||
matches!(
|
||||
kind,
|
||||
ObjectKind::Image | ObjectKind::Video | ObjectKind::Document
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "ffmpeg"))]
|
||||
{
|
||||
matches!(kind, ObjectKind::Image | ObjectKind::Document)
|
||||
}
|
||||
};
|
||||
|
||||
// TODO: This requires all paths to be loaded before thumbnailing starts.
|
||||
// TODO: This copies the existing functionality but will not fly with Cloud locations (as loading paths will be *way* slower)
|
||||
// TODO: https://linear.app/spacedriveapp/issue/ENG-1719/cloud-thumbnailer
|
||||
let thumbnail = if should_generate_thumbnail {
|
||||
if from == PathFrom::Path {
|
||||
let size = u64::from_be_bytes((&*item.size_in_bytes_bytes).try_into().expect("Invalid size"));
|
||||
if let Ok(cas_id) = generate_cas_id(&item.path, size).await.map_err(|err| error!("Error generating cas id for '{:?}': {err:?}", item.path)) {
|
||||
if ObjectKind::from_i32(item.kind) == ObjectKind::Document {
|
||||
to_generate.push(GenerateThumbnailArgs::new(
|
||||
item.extension.clone(),
|
||||
cas_id.clone(),
|
||||
PathBuf::from(&item.path),
|
||||
));
|
||||
} else {
|
||||
to_generate.push(GenerateThumbnailArgs::new(
|
||||
item.extension.clone(),
|
||||
cas_id.clone(),
|
||||
PathBuf::from(&item.path),
|
||||
));
|
||||
}
|
||||
|
||||
Some(get_ephemeral_thumb_key(&cas_id))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
warn!("Thumbnailer not supported for cloud locations");
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
entries.push(if let Some(item) = locations.get(&item.path) {
|
||||
ExplorerItem::Location {
|
||||
item: item.clone(),
|
||||
}
|
||||
} else {
|
||||
ExplorerItem::NonIndexedPath {
|
||||
thumbnail,
|
||||
item,
|
||||
}
|
||||
});
|
||||
Ok(item) => entries.push(item),
|
||||
Err(e) => match e {
|
||||
Either::Left(e) => errors.push(e),
|
||||
Either::Right(e) => errors.push(e.into()),
|
||||
},
|
||||
Err(e) => errors.push(e.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -244,16 +162,6 @@ pub fn mount() -> AlphaRouter<Ctx> {
|
|||
nodes,
|
||||
};
|
||||
}
|
||||
|
||||
if to_generate.len() > 0 {
|
||||
node.thumbnailer
|
||||
.new_ephemeral_thumbnails_batch(BatchToProcess::new(
|
||||
to_generate,
|
||||
false,
|
||||
false,
|
||||
))
|
||||
.await;
|
||||
}
|
||||
}))
|
||||
},
|
||||
)
|
||||
|
@ -289,7 +197,9 @@ pub fn mount() -> AlphaRouter<Ctx> {
|
|||
let params = {
|
||||
let (mut fp, obj) = merge_filters(filters, db).await?;
|
||||
|
||||
fp.push(prisma::file_path::object::is(obj));
|
||||
if !obj.is_empty() {
|
||||
fp.push(prisma::file_path::object::is(obj));
|
||||
}
|
||||
|
||||
fp
|
||||
};
|
||||
|
@ -319,7 +229,7 @@ pub fn mount() -> AlphaRouter<Ctx> {
|
|||
let mut items = Vec::with_capacity(file_paths.len());
|
||||
|
||||
for file_path in file_paths {
|
||||
let thumbnail_exists_locally = if let Some(cas_id) = &file_path.cas_id {
|
||||
let has_created_thumbnail = if let Some(cas_id) = &file_path.cas_id {
|
||||
library
|
||||
.thumbnail_exists(&node, cas_id)
|
||||
.await
|
||||
|
@ -332,8 +242,9 @@ pub fn mount() -> AlphaRouter<Ctx> {
|
|||
thumbnail: file_path
|
||||
.cas_id
|
||||
.as_ref()
|
||||
.filter(|_| thumbnail_exists_locally)
|
||||
// .filter(|_| thumbnail_exists_locally)
|
||||
.map(|i| get_indexed_thumb_key(i, library.id)),
|
||||
has_created_thumbnail,
|
||||
item: file_path,
|
||||
})
|
||||
}
|
||||
|
@ -366,7 +277,9 @@ pub fn mount() -> AlphaRouter<Ctx> {
|
|||
.count({
|
||||
let (mut fp, obj) = merge_filters(filters, db).await?;
|
||||
|
||||
fp.push(prisma::file_path::object::is(obj));
|
||||
if !obj.is_empty() {
|
||||
fp.push(prisma::file_path::object::is(obj));
|
||||
}
|
||||
|
||||
fp
|
||||
})
|
||||
|
@ -401,7 +314,9 @@ pub fn mount() -> AlphaRouter<Ctx> {
|
|||
.find_many({
|
||||
let (fp, mut obj) = merge_filters(filters, db).await?;
|
||||
|
||||
obj.push(prisma::object::file_paths::some(fp));
|
||||
if !fp.is_empty() {
|
||||
obj.push(prisma::object::file_paths::some(fp));
|
||||
}
|
||||
|
||||
obj
|
||||
})
|
||||
|
@ -434,7 +349,7 @@ pub fn mount() -> AlphaRouter<Ctx> {
|
|||
.map(|fp| fp.cas_id.as_ref())
|
||||
.find_map(|c| c);
|
||||
|
||||
let thumbnail_exists_locally = if let Some(cas_id) = cas_id {
|
||||
let has_created_thumbnail = if let Some(cas_id) = cas_id {
|
||||
library.thumbnail_exists(&node, cas_id).await.map_err(|e| {
|
||||
rspc::Error::with_cause(
|
||||
ErrorCode::InternalServerError,
|
||||
|
@ -448,9 +363,10 @@ pub fn mount() -> AlphaRouter<Ctx> {
|
|||
|
||||
items.push(ExplorerItem::Object {
|
||||
thumbnail: cas_id
|
||||
.filter(|_| thumbnail_exists_locally)
|
||||
// .filter(|_| thumbnail_exists_locally)
|
||||
.map(|cas_id| get_indexed_thumb_key(cas_id, library.id)),
|
||||
item: object,
|
||||
has_created_thumbnail,
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -482,7 +398,9 @@ pub fn mount() -> AlphaRouter<Ctx> {
|
|||
.count({
|
||||
let (fp, mut obj) = merge_filters(filters, db).await?;
|
||||
|
||||
obj.push(prisma::object::file_paths::some(fp));
|
||||
if !fp.is_empty() {
|
||||
obj.push(prisma::object::file_paths::some(fp));
|
||||
}
|
||||
|
||||
obj
|
||||
})
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
use sd_sync::CompressedCRDTOperations;
|
||||
use std::sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
|
@ -68,7 +69,7 @@ pub async fn run_actor(
|
|||
.send(sd_core_sync::Event::Messages(MessagesEvent {
|
||||
instance_id: sync.instance,
|
||||
has_more: ops.len() == OPS_PER_REQUEST as usize,
|
||||
messages: ops,
|
||||
messages: CompressedCRDTOperations::new(ops),
|
||||
}))
|
||||
.await
|
||||
);
|
||||
|
|
|
@ -64,7 +64,6 @@ pub struct Node {
|
|||
pub event_bus: (broadcast::Sender<CoreEvent>, broadcast::Receiver<CoreEvent>),
|
||||
pub notifications: Notifications,
|
||||
pub thumbnailer: OldThumbnailer,
|
||||
pub files_over_p2p_flag: Arc<AtomicBool>,
|
||||
pub cloud_sync_flag: Arc<AtomicBool>,
|
||||
pub env: Arc<env::Env>,
|
||||
pub http: reqwest::Client,
|
||||
|
@ -135,7 +134,6 @@ impl Node {
|
|||
config,
|
||||
event_bus,
|
||||
libraries,
|
||||
files_over_p2p_flag: Arc::new(AtomicBool::new(false)),
|
||||
cloud_sync_flag: Arc::new(AtomicBool::new(false)),
|
||||
http: reqwest::Client::new(),
|
||||
env,
|
||||
|
|
|
@ -14,7 +14,6 @@ use sd_core_file_path_helper::{
|
|||
};
|
||||
use sd_core_prisma_helpers::location_with_indexer_rules;
|
||||
|
||||
use sd_indexer::path::normalize_path;
|
||||
use sd_prisma::{
|
||||
prisma::{file_path, indexer_rules_in_location, location, PrismaClient},
|
||||
prisma_sync,
|
||||
|
@ -23,17 +22,18 @@ use sd_sync::*;
|
|||
use sd_utils::{
|
||||
db::{maybe_missing, MissingFieldError},
|
||||
error::{FileIOError, NonUtf8PathError},
|
||||
msgpack, uuid_to_bytes,
|
||||
msgpack,
|
||||
};
|
||||
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
path::{Path, PathBuf},
|
||||
path::{Component, Path, PathBuf},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use chrono::Utc;
|
||||
use futures::future::TryFutureExt;
|
||||
use normpath::PathExt;
|
||||
use prisma_client_rust::{operator::and, or, QueryError};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
|
@ -46,6 +46,7 @@ mod error;
|
|||
pub mod indexer;
|
||||
mod manager;
|
||||
pub mod metadata;
|
||||
pub mod non_indexed;
|
||||
|
||||
pub use error::LocationError;
|
||||
use indexer::OldIndexerJobInit;
|
||||
|
@ -653,6 +654,57 @@ pub struct CreatedLocationResult {
|
|||
pub data: location_with_indexer_rules::Data,
|
||||
}
|
||||
|
||||
pub(crate) fn normalize_path(path: impl AsRef<Path>) -> io::Result<(String, String)> {
|
||||
let mut path = path.as_ref().to_path_buf();
|
||||
let (location_path, normalized_path) = path
|
||||
// Normalize path and also check if it exists
|
||||
.normalize()
|
||||
.and_then(|normalized_path| {
|
||||
if cfg!(windows) {
|
||||
// Use normalized path as main path on Windows
|
||||
// This ensures we always receive a valid windows formatted path
|
||||
// ex: /Users/JohnDoe/Downloads will become C:\Users\JohnDoe\Downloads
|
||||
// Internally `normalize` calls `GetFullPathNameW` on Windows
|
||||
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfullpathnamew
|
||||
path = normalized_path.as_path().to_path_buf();
|
||||
}
|
||||
|
||||
Ok((
|
||||
// TODO: Maybe save the path bytes instead of the string representation to avoid depending on UTF-8
|
||||
path.to_str().map(str::to_string).ok_or(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Found non-UTF-8 path",
|
||||
))?,
|
||||
normalized_path,
|
||||
))
|
||||
})?;
|
||||
|
||||
// Not needed on Windows because the normalization already handles it
|
||||
if cfg!(not(windows)) {
|
||||
// Replace location_path with normalize_path, when the first one ends in `.` or `..`
|
||||
// This is required so localize_name doesn't panic
|
||||
if let Some(component) = path.components().next_back() {
|
||||
if matches!(component, Component::CurDir | Component::ParentDir) {
|
||||
path = normalized_path.as_path().to_path_buf();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Use `to_string_lossy` because a partially corrupted but identifiable name is better than nothing
|
||||
let mut name = path.localize_name().to_string_lossy().to_string();
|
||||
|
||||
// Windows doesn't have a root directory
|
||||
if cfg!(not(windows)) && name == "/" {
|
||||
name = "Root".to_string()
|
||||
}
|
||||
|
||||
if name.replace(char::REPLACEMENT_CHARACTER, "") == "" {
|
||||
name = "Unknown".to_string()
|
||||
}
|
||||
|
||||
Ok((location_path, name))
|
||||
}
|
||||
|
||||
async fn create_location(
|
||||
library @ Library { db, sync, .. }: &Library,
|
||||
location_pub_id: Uuid,
|
||||
|
|
385
core/src/location/non_indexed.rs
Normal file
385
core/src/location/non_indexed.rs
Normal file
|
@ -0,0 +1,385 @@
|
|||
use crate::{
|
||||
api::locations::ExplorerItem,
|
||||
library::Library,
|
||||
object::{
|
||||
cas::generate_cas_id,
|
||||
media::old_thumbnail::{get_ephemeral_thumb_key, BatchToProcess, GenerateThumbnailArgs},
|
||||
},
|
||||
Node,
|
||||
};
|
||||
|
||||
use sd_core_file_path_helper::{path_is_hidden, MetadataExt};
|
||||
use sd_core_indexer_rules::{
|
||||
seed::{no_hidden, no_os_protected},
|
||||
IndexerRule, RuleKind,
|
||||
};
|
||||
|
||||
use sd_file_ext::{extensions::Extension, kind::ObjectKind};
|
||||
use sd_prisma::prisma::location;
|
||||
use sd_utils::{chain_optional_iter, error::FileIOError};
|
||||
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
io::ErrorKind,
|
||||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use futures::Stream;
|
||||
use itertools::Either;
|
||||
use rspc::ErrorCode;
|
||||
use serde::Serialize;
|
||||
use specta::Type;
|
||||
use thiserror::Error;
|
||||
use tokio::{io, sync::mpsc, task::JoinError};
|
||||
use tokio_stream::wrappers::ReceiverStream;
|
||||
use tracing::{error, span, warn, Level};
|
||||
|
||||
use super::normalize_path;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum NonIndexedLocationError {
|
||||
#[error("path not found: {}", .0.display())]
|
||||
NotFound(PathBuf),
|
||||
|
||||
#[error(transparent)]
|
||||
FileIO(#[from] FileIOError),
|
||||
|
||||
#[error("database error: {0}")]
|
||||
Database(#[from] prisma_client_rust::QueryError),
|
||||
|
||||
#[error("error joining tokio task: {0}")]
|
||||
TaskJoinError(#[from] JoinError),
|
||||
|
||||
#[error("receiver shutdown error")]
|
||||
SendError,
|
||||
}
|
||||
|
||||
impl<T> From<mpsc::error::SendError<T>> for NonIndexedLocationError {
|
||||
fn from(_: mpsc::error::SendError<T>) -> Self {
|
||||
Self::SendError
|
||||
}
|
||||
}
|
||||
|
||||
impl From<NonIndexedLocationError> for rspc::Error {
|
||||
fn from(err: NonIndexedLocationError) -> Self {
|
||||
match err {
|
||||
NonIndexedLocationError::NotFound(_) => {
|
||||
rspc::Error::with_cause(ErrorCode::NotFound, err.to_string(), err)
|
||||
}
|
||||
_ => rspc::Error::with_cause(ErrorCode::InternalServerError, err.to_string(), err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<P: AsRef<Path>> From<(P, io::Error)> for NonIndexedLocationError {
|
||||
fn from((path, source): (P, io::Error)) -> Self {
|
||||
if source.kind() == io::ErrorKind::NotFound {
|
||||
Self::NotFound(path.as_ref().into())
|
||||
} else {
|
||||
Self::FileIO(FileIOError::from((path, source)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Type, Debug)]
|
||||
pub struct NonIndexedPathItem {
|
||||
pub path: String,
|
||||
pub name: String,
|
||||
pub extension: String,
|
||||
pub kind: i32,
|
||||
pub is_dir: bool,
|
||||
pub date_created: DateTime<Utc>,
|
||||
pub date_modified: DateTime<Utc>,
|
||||
pub size_in_bytes_bytes: Vec<u8>,
|
||||
pub hidden: bool,
|
||||
}
|
||||
|
||||
// #[instrument(name = "non_indexed::walk", skip(sort_fn))]
|
||||
pub async fn walk(
|
||||
path: PathBuf,
|
||||
with_hidden_files: bool,
|
||||
node: Arc<Node>,
|
||||
library: Arc<Library>,
|
||||
sort_fn: impl FnOnce(&mut Vec<Entry>) + Send,
|
||||
) -> Result<
|
||||
impl Stream<Item = Result<ExplorerItem, Either<rspc::Error, NonIndexedLocationError>>> + Send,
|
||||
NonIndexedLocationError,
|
||||
> {
|
||||
let mut entries = get_all_entries(path.clone()).await?;
|
||||
|
||||
{
|
||||
let span = span!(Level::INFO, "sort_fn");
|
||||
let _enter = span.enter();
|
||||
|
||||
sort_fn(&mut entries);
|
||||
}
|
||||
|
||||
let (tx, rx) = mpsc::channel(128);
|
||||
let tx2 = tx.clone();
|
||||
|
||||
// We wanna process and let the caller use the stream.
|
||||
let task = tokio::spawn(async move {
|
||||
let path = &path;
|
||||
let rules = chain_optional_iter(
|
||||
[IndexerRule::from(no_os_protected())],
|
||||
[(!with_hidden_files).then(|| IndexerRule::from(no_hidden()))],
|
||||
);
|
||||
|
||||
let mut thumbnails_to_generate = vec![];
|
||||
// Generating thumbnails for PDFs is kinda slow, so we're leaving them for last in the batch
|
||||
let mut document_thumbnails_to_generate = vec![];
|
||||
let mut directories = vec![];
|
||||
|
||||
for entry in entries.into_iter() {
|
||||
let (entry_path, name) = match normalize_path(entry.path) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
tx.send(Err(Either::Left(
|
||||
NonIndexedLocationError::from((path, e)).into(),
|
||||
)))
|
||||
.await?;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
match IndexerRule::apply_all(&rules, &entry_path).await {
|
||||
Ok(rule_results) => {
|
||||
// No OS Protected and No Hidden rules, must always be from this kind, should panic otherwise
|
||||
if rule_results[&RuleKind::RejectFilesByGlob]
|
||||
.iter()
|
||||
.any(|reject| !reject)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
tx.send(Err(Either::Left(e.into()))).await?;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
if entry.metadata.is_dir() {
|
||||
directories.push((entry_path, name, entry.metadata));
|
||||
} else {
|
||||
let path = Path::new(&entry_path);
|
||||
|
||||
let Some(name) = path
|
||||
.file_stem()
|
||||
.and_then(|s| s.to_str().map(str::to_string))
|
||||
else {
|
||||
warn!("Failed to extract name from path: {}", &entry_path);
|
||||
continue;
|
||||
};
|
||||
|
||||
let extension = path
|
||||
.extension()
|
||||
.and_then(|s| s.to_str().map(str::to_string))
|
||||
.unwrap_or_default();
|
||||
|
||||
let kind = Extension::resolve_conflicting(&path, false)
|
||||
.await
|
||||
.map(Into::into)
|
||||
.unwrap_or(ObjectKind::Unknown);
|
||||
|
||||
let should_generate_thumbnail = {
|
||||
#[cfg(feature = "ffmpeg")]
|
||||
{
|
||||
matches!(
|
||||
kind,
|
||||
ObjectKind::Image | ObjectKind::Video | ObjectKind::Document
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "ffmpeg"))]
|
||||
{
|
||||
matches!(kind, ObjectKind::Image | ObjectKind::Document)
|
||||
}
|
||||
};
|
||||
|
||||
let thumbnail_key = if should_generate_thumbnail {
|
||||
if let Ok(cas_id) =
|
||||
generate_cas_id(&path, entry.metadata.len())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
tx.send(Err(Either::Left(
|
||||
NonIndexedLocationError::from((path, e)).into(),
|
||||
)))
|
||||
}) {
|
||||
if kind == ObjectKind::Document {
|
||||
document_thumbnails_to_generate.push(GenerateThumbnailArgs::new(
|
||||
extension.clone(),
|
||||
cas_id.clone(),
|
||||
path.to_path_buf(),
|
||||
));
|
||||
} else {
|
||||
thumbnails_to_generate.push(GenerateThumbnailArgs::new(
|
||||
extension.clone(),
|
||||
cas_id.clone(),
|
||||
path.to_path_buf(),
|
||||
));
|
||||
}
|
||||
|
||||
Some(get_ephemeral_thumb_key(&cas_id))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
tx.send(Ok(ExplorerItem::NonIndexedPath {
|
||||
thumbnail: thumbnail_key,
|
||||
item: NonIndexedPathItem {
|
||||
hidden: path_is_hidden(Path::new(&entry_path), &entry.metadata),
|
||||
path: entry_path,
|
||||
name,
|
||||
extension,
|
||||
kind: kind as i32,
|
||||
is_dir: false,
|
||||
date_created: entry.metadata.created_or_now().into(),
|
||||
date_modified: entry.metadata.modified_or_now().into(),
|
||||
size_in_bytes_bytes: entry.metadata.len().to_be_bytes().to_vec(),
|
||||
},
|
||||
has_created_thumbnail: false,
|
||||
}))
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
thumbnails_to_generate.extend(document_thumbnails_to_generate);
|
||||
|
||||
node.thumbnailer
|
||||
.new_ephemeral_thumbnails_batch(BatchToProcess::new(
|
||||
thumbnails_to_generate,
|
||||
false,
|
||||
false,
|
||||
))
|
||||
.await;
|
||||
|
||||
let mut locations = library
|
||||
.db
|
||||
.location()
|
||||
.find_many(vec![location::path::in_vec(
|
||||
directories
|
||||
.iter()
|
||||
.map(|(path, _, _)| path.clone())
|
||||
.collect(),
|
||||
)])
|
||||
.exec()
|
||||
.await?
|
||||
.into_iter()
|
||||
.flat_map(|location| {
|
||||
location
|
||||
.path
|
||||
.clone()
|
||||
.map(|location_path| (location_path, location))
|
||||
})
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
for (directory, name, metadata) in directories {
|
||||
if let Some(location) = locations.remove(&directory) {
|
||||
tx.send(Ok(ExplorerItem::Location { item: location }))
|
||||
.await?;
|
||||
} else {
|
||||
tx.send(Ok(ExplorerItem::NonIndexedPath {
|
||||
thumbnail: None,
|
||||
item: NonIndexedPathItem {
|
||||
hidden: path_is_hidden(Path::new(&directory), &metadata),
|
||||
path: directory,
|
||||
name,
|
||||
extension: String::new(),
|
||||
kind: ObjectKind::Folder as i32,
|
||||
is_dir: true,
|
||||
date_created: metadata.created_or_now().into(),
|
||||
date_modified: metadata.modified_or_now().into(),
|
||||
size_in_bytes_bytes: metadata.len().to_be_bytes().to_vec(),
|
||||
},
|
||||
has_created_thumbnail: false,
|
||||
}))
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok::<_, NonIndexedLocationError>(())
|
||||
});
|
||||
|
||||
tokio::spawn(async move {
|
||||
match task.await {
|
||||
Ok(Ok(())) => {}
|
||||
Ok(Err(e)) => {
|
||||
let _ = tx2.send(Err(Either::Left(e.into()))).await;
|
||||
}
|
||||
Err(e) => error!("error joining tokio task: {}", e),
|
||||
}
|
||||
});
|
||||
|
||||
Ok(ReceiverStream::new(rx))
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Entry {
|
||||
path: PathBuf,
|
||||
name: String,
|
||||
// size_in_bytes: u64,
|
||||
// date_created:
|
||||
metadata: std::fs::Metadata,
|
||||
}
|
||||
|
||||
impl Entry {
|
||||
pub fn name(&self) -> &str {
|
||||
&self.name
|
||||
}
|
||||
|
||||
pub fn size_in_bytes(&self) -> u64 {
|
||||
self.metadata.len()
|
||||
}
|
||||
|
||||
pub fn date_created(&self) -> DateTime<Utc> {
|
||||
self.metadata.created_or_now().into()
|
||||
}
|
||||
|
||||
pub fn date_modified(&self) -> DateTime<Utc> {
|
||||
self.metadata.modified_or_now().into()
|
||||
}
|
||||
}
|
||||
|
||||
/// We get all of the FS entries first before we start processing on each of them.
|
||||
///
|
||||
/// From my M1 Macbook Pro this:
|
||||
/// - takes 11ms per 10 000 files
|
||||
/// and
|
||||
/// - consumes 0.16MB of RAM per 10 000 entries.
|
||||
///
|
||||
/// The reason we collect these all up is so we can apply ordering, and then begin streaming the data as it's processed to the frontend.
|
||||
// #[instrument(name = "get_all_entries")]
|
||||
pub async fn get_all_entries(path: PathBuf) -> Result<Vec<Entry>, NonIndexedLocationError> {
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let path = &path;
|
||||
let dir = std::fs::read_dir(path).map_err(|e| (path, e))?;
|
||||
let mut entries = Vec::new();
|
||||
for entry in dir {
|
||||
let entry = entry.map_err(|e| (path, e))?;
|
||||
|
||||
// We must not keep `entry` around as we will quickly hit the OS limit on open file descriptors
|
||||
entries.push(Entry {
|
||||
path: entry.path(),
|
||||
name: entry
|
||||
.file_name()
|
||||
.to_str()
|
||||
.ok_or_else(|| {
|
||||
(
|
||||
path,
|
||||
io::Error::new(ErrorKind::Other, "error non UTF-8 path"),
|
||||
)
|
||||
})?
|
||||
.to_string(),
|
||||
metadata: entry.metadata().map_err(|e| (path, e))?,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(entries)
|
||||
})
|
||||
.await?
|
||||
}
|
|
@ -37,20 +37,64 @@ pub enum P2PDiscoveryState {
|
|||
}
|
||||
|
||||
#[derive(Debug, Default, Clone, Copy, Serialize, Deserialize, Type)]
|
||||
#[serde(rename_all = "snake_case", untagged)]
|
||||
#[serde(rename_all = "snake_case", tag = "type", content = "value")]
|
||||
pub enum Port {
|
||||
Disabled,
|
||||
#[default]
|
||||
Random,
|
||||
Discrete(u16),
|
||||
}
|
||||
|
||||
impl Port {
|
||||
pub fn get(&self) -> u16 {
|
||||
match self {
|
||||
Port::Random => 0,
|
||||
Port::Discrete(port) => *port,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_random(&self) -> bool {
|
||||
matches!(self, Port::Random)
|
||||
}
|
||||
}
|
||||
|
||||
fn default_as_true() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn skip_if_true(value: &bool) -> bool {
|
||||
*value
|
||||
}
|
||||
|
||||
fn skip_if_false(value: &bool) -> bool {
|
||||
!*value
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Type)]
|
||||
pub struct NodeConfigP2P {
|
||||
#[serde(default)]
|
||||
pub discovery: P2PDiscoveryState,
|
||||
#[serde(default, skip_serializing_if = "Port::is_random")]
|
||||
pub port: Port,
|
||||
#[serde(default = "default_as_true", skip_serializing_if = "skip_if_true")]
|
||||
pub ipv4: bool,
|
||||
#[serde(default = "default_as_true", skip_serializing_if = "skip_if_true")]
|
||||
pub ipv6: bool,
|
||||
#[serde(default, skip_serializing_if = "skip_if_false")]
|
||||
pub remote_access: bool,
|
||||
}
|
||||
|
||||
impl Default for NodeConfigP2P {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
discovery: P2PDiscoveryState::Everyone,
|
||||
port: Port::Random,
|
||||
ipv4: true,
|
||||
ipv6: true,
|
||||
remote_access: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// NodeConfig is the configuration for a node. This is shared between all libraries and is stored in a JSON file on disk.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)] // If you are adding `specta::Type` on this your probably about to leak the P2P private key
|
||||
pub struct NodeConfig {
|
||||
|
@ -66,12 +110,8 @@ pub struct NodeConfig {
|
|||
#[serde(with = "identity_serde")]
|
||||
pub identity: Identity,
|
||||
/// P2P config
|
||||
#[serde(default, skip_serializing_if = "Port::is_random")]
|
||||
pub p2p_ipv4_port: Port,
|
||||
#[serde(default, skip_serializing_if = "Port::is_random")]
|
||||
pub p2p_ipv6_port: Port,
|
||||
#[serde(default)]
|
||||
pub p2p_discovery: P2PDiscoveryState,
|
||||
pub p2p: NodeConfigP2P,
|
||||
/// Feature flags enabled on the node
|
||||
#[serde(default)]
|
||||
pub features: Vec<BackendFeature>,
|
||||
|
@ -153,9 +193,7 @@ impl ManagedVersion<NodeConfigVersion> for NodeConfig {
|
|||
id: Uuid::new_v4(),
|
||||
name,
|
||||
identity: Identity::default(),
|
||||
p2p_ipv4_port: Port::Random,
|
||||
p2p_ipv6_port: Port::Random,
|
||||
p2p_discovery: P2PDiscoveryState::Everyone,
|
||||
p2p: NodeConfigP2P::default(),
|
||||
version: Self::LATEST_VERSION,
|
||||
features: vec![],
|
||||
notifications: vec![],
|
||||
|
|
|
@ -373,7 +373,12 @@ pub(super) async fn generate_thumbnail(
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This if is REALLY needed, due to the sheer performance of the thumbnailer,
|
||||
// I restricted to only send events notifying for thumbnails in the current
|
||||
// opened directory, sending events for the entire location turns into a
|
||||
// humongous bottleneck in the frontend lol, since it doesn't even knows
|
||||
// what to do with thumbnails for inner directories lol
|
||||
// - fogodev
|
||||
if !in_background {
|
||||
trace!("Emitting new thumbnail event");
|
||||
if reporter
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
use crate::{
|
||||
api::CoreEvent,
|
||||
library::Library,
|
||||
location::ScanState,
|
||||
old_job::{
|
||||
|
@ -226,6 +227,11 @@ impl StatefulJob for OldFileIdentifierJobInit {
|
|||
new_metadata.total_objects_linked = total_objects_linked;
|
||||
new_metadata.cursor = new_cursor;
|
||||
|
||||
// send an array of ids to let clients know new objects were identified
|
||||
ctx.node.emit(CoreEvent::NewIdentifiedObjects {
|
||||
file_path_ids: file_paths.iter().map(|fp| fp.id).collect(),
|
||||
});
|
||||
|
||||
ctx.progress(vec![
|
||||
JobReportUpdate::CompletedTaskCount(step_number * CHUNK_SIZE + file_paths.len()),
|
||||
JobReportUpdate::Message(format!(
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use crate::{
|
||||
node::{
|
||||
config::{self, P2PDiscoveryState, Port},
|
||||
config::{self, P2PDiscoveryState},
|
||||
get_hardware_model_name, HardwareModel,
|
||||
},
|
||||
p2p::{
|
||||
|
@ -14,17 +14,14 @@ use axum::routing::IntoMakeService;
|
|||
|
||||
use sd_p2p::{
|
||||
flume::{bounded, Receiver},
|
||||
HookId, Libp2pPeerId, Listener, Mdns, Peer, QuicTransport, RelayServerEntry, RemoteIdentity,
|
||||
HookId, Libp2pPeerId, Mdns, Peer, QuicTransport, RelayServerEntry, RemoteIdentity,
|
||||
UnicastStream, P2P,
|
||||
};
|
||||
use sd_p2p_tunnel::Tunnel;
|
||||
use serde::Serialize;
|
||||
use serde_json::json;
|
||||
use specta::Type;
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
collections::HashMap,
|
||||
convert::Infallible,
|
||||
net::SocketAddr,
|
||||
sync::{atomic::AtomicBool, Arc, Mutex, PoisonError},
|
||||
time::Duration,
|
||||
};
|
||||
|
@ -37,6 +34,12 @@ use uuid::Uuid;
|
|||
|
||||
use super::{P2PEvents, PeerMetadata};
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ListenerErrors {
|
||||
pub ipv4: Option<String>,
|
||||
pub ipv6: Option<String>,
|
||||
}
|
||||
|
||||
pub struct P2PManager {
|
||||
pub(crate) p2p: Arc<P2P>,
|
||||
mdns: Mutex<Option<Mdns>>,
|
||||
|
@ -48,6 +51,7 @@ pub struct P2PManager {
|
|||
pub(super) spacedrop_cancellations: Arc<Mutex<HashMap<Uuid, Arc<AtomicBool>>>>,
|
||||
pub(crate) node_config: Arc<config::Manager>,
|
||||
pub libraries_hook_id: HookId,
|
||||
pub listener_errors: Mutex<ListenerErrors>,
|
||||
}
|
||||
|
||||
impl P2PManager {
|
||||
|
@ -75,6 +79,7 @@ impl P2PManager {
|
|||
spacedrop_cancellations: Default::default(),
|
||||
node_config,
|
||||
libraries_hook_id,
|
||||
listener_errors: Default::default(),
|
||||
});
|
||||
this.on_node_config_change().await;
|
||||
|
||||
|
@ -141,35 +146,45 @@ impl P2PManager {
|
|||
}
|
||||
.update(&mut self.p2p.metadata_mut());
|
||||
|
||||
let port = match config.p2p_ipv4_port {
|
||||
Port::Disabled => None,
|
||||
Port::Random => Some(0),
|
||||
Port::Discrete(port) => Some(port),
|
||||
};
|
||||
info!("Setting quic ipv4 listener to: {port:?}");
|
||||
if let Err(err) = self.quic.set_ipv4_enabled(port).await {
|
||||
let port = config.p2p.port.get();
|
||||
|
||||
info!(
|
||||
"Setting quic ipv4 listener to: {:?}",
|
||||
config.p2p.ipv4.then_some(port)
|
||||
);
|
||||
if let Err(err) = self
|
||||
.quic
|
||||
.set_ipv4_enabled(config.p2p.ipv4.then_some(port))
|
||||
.await
|
||||
{
|
||||
error!("Failed to enabled quic ipv4 listener: {err}");
|
||||
self.node_config
|
||||
.write(|c| c.p2p_ipv4_port = Port::Disabled)
|
||||
.await
|
||||
.ok();
|
||||
self.node_config.write(|c| c.p2p.ipv4 = false).await.ok();
|
||||
|
||||
self.listener_errors
|
||||
.lock()
|
||||
.unwrap_or_else(PoisonError::into_inner)
|
||||
.ipv4 = Some(err.to_string());
|
||||
}
|
||||
|
||||
let port = match config.p2p_ipv6_port {
|
||||
Port::Disabled => None,
|
||||
Port::Random => Some(0),
|
||||
Port::Discrete(port) => Some(port),
|
||||
};
|
||||
info!("Setting quic ipv4 listener to: {port:?}");
|
||||
if let Err(err) = self.quic.set_ipv6_enabled(port).await {
|
||||
info!(
|
||||
"Setting quic ipv6 listener to: {:?}",
|
||||
config.p2p.ipv6.then_some(port)
|
||||
);
|
||||
if let Err(err) = self
|
||||
.quic
|
||||
.set_ipv6_enabled(config.p2p.ipv6.then_some(port))
|
||||
.await
|
||||
{
|
||||
error!("Failed to enabled quic ipv6 listener: {err}");
|
||||
self.node_config
|
||||
.write(|c| c.p2p_ipv6_port = Port::Disabled)
|
||||
.await
|
||||
.ok();
|
||||
self.node_config.write(|c| c.p2p.ipv6 = false).await.ok();
|
||||
|
||||
self.listener_errors
|
||||
.lock()
|
||||
.unwrap_or_else(PoisonError::into_inner)
|
||||
.ipv6 = Some(err.to_string());
|
||||
}
|
||||
|
||||
let should_revert = match config.p2p_discovery {
|
||||
let should_revert = match config.p2p.discovery {
|
||||
P2PDiscoveryState::Everyone
|
||||
// TODO: Make `ContactsOnly` work
|
||||
| P2PDiscoveryState::ContactsOnly => {
|
||||
|
@ -210,7 +225,7 @@ impl P2PManager {
|
|||
if should_revert {
|
||||
let _ = self
|
||||
.node_config
|
||||
.write(|c| c.p2p_discovery = P2PDiscoveryState::Disabled)
|
||||
.write(|c| c.p2p.discovery = P2PDiscoveryState::Disabled)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
@ -255,11 +270,7 @@ impl P2PManager {
|
|||
"name": name,
|
||||
"listener_addrs": listeners.iter().find(|l| l.is_hook_id(*id)).map(|l| l.addrs.clone()),
|
||||
})).collect::<Vec<_>>(),
|
||||
"config": json!({
|
||||
"p2p_ipv4_port": node_config.p2p_ipv4_port,
|
||||
"p2p_ipv6_port": node_config.p2p_ipv6_port,
|
||||
"p2p_discovery": node_config.p2p_discovery,
|
||||
}),
|
||||
"config": node_config.p2p,
|
||||
"relay_config": self.quic.get_relay_config(),
|
||||
})
|
||||
}
|
||||
|
@ -282,8 +293,6 @@ async fn start(
|
|||
let mut service = unwrap_infallible(service.call(()).await);
|
||||
|
||||
tokio::spawn(async move {
|
||||
println!("APPLICATION GOT STREAM: {:?}", stream); // TODO
|
||||
|
||||
let Ok(header) = Header::from_stream(&mut stream).await.map_err(|err| {
|
||||
error!("Failed to read header from stream: {}", err);
|
||||
}) else {
|
||||
|
@ -337,7 +346,8 @@ async fn start(
|
|||
}
|
||||
Header::Http => {
|
||||
let remote = stream.remote_identity();
|
||||
let Err(err) = operations::rspc::receiver(stream, &mut service).await else {
|
||||
let Err(err) = operations::rspc::receiver(stream, &mut service, &node).await
|
||||
else {
|
||||
return;
|
||||
};
|
||||
|
||||
|
@ -350,23 +360,6 @@ async fn start(
|
|||
Ok::<_, ()>(())
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Type)]
|
||||
pub struct Listener2 {
|
||||
pub id: String,
|
||||
pub name: &'static str,
|
||||
pub addrs: HashSet<SocketAddr>,
|
||||
}
|
||||
|
||||
pub fn into_listener2(l: &[Listener]) -> Vec<Listener2> {
|
||||
l.iter()
|
||||
.map(|l| Listener2 {
|
||||
id: format!("{:?}", l.id),
|
||||
name: l.name,
|
||||
addrs: l.addrs.clone(),
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn unwrap_infallible<T>(result: Result<T, Infallible>) -> T {
|
||||
match result {
|
||||
Ok(value) => value,
|
||||
|
|
|
@ -6,7 +6,7 @@ use sd_p2p::{RemoteIdentity, UnicastStream, P2P};
|
|||
use tokio::io::AsyncWriteExt;
|
||||
use tracing::debug;
|
||||
|
||||
use crate::p2p::Header;
|
||||
use crate::{p2p::Header, Node};
|
||||
|
||||
/// Transfer an rspc query to a remote node.
|
||||
#[allow(unused)]
|
||||
|
@ -37,6 +37,7 @@ pub async fn remote_rspc(
|
|||
pub(crate) async fn receiver(
|
||||
stream: UnicastStream,
|
||||
service: &mut Router,
|
||||
node: &Node,
|
||||
) -> Result<(), Box<dyn Error>> {
|
||||
debug!(
|
||||
"Received http request from peer '{}'",
|
||||
|
@ -45,8 +46,8 @@ pub(crate) async fn receiver(
|
|||
|
||||
// TODO: Authentication
|
||||
#[allow(clippy::todo)]
|
||||
if true {
|
||||
todo!("You wouldn't download a car!");
|
||||
if node.config.get().await.p2p.remote_access {
|
||||
todo!("No way buddy!");
|
||||
}
|
||||
|
||||
Http::new()
|
||||
|
|
|
@ -85,7 +85,7 @@ pub async fn spacedrop(
|
|||
debug!("({id}): connected, sending header");
|
||||
let header = Header::Spacedrop(SpaceblockRequests {
|
||||
id,
|
||||
block_size: BlockSize::from_size(total_length),
|
||||
block_size: BlockSize::from_file_size(total_length),
|
||||
requests,
|
||||
});
|
||||
if let Err(err) = stream.write_all(&header.to_bytes()).await {
|
||||
|
|
|
@ -6,7 +6,7 @@ use crate::{
|
|||
};
|
||||
|
||||
use sd_p2p_proto::{decode, encode};
|
||||
use sd_sync::CRDTOperation;
|
||||
use sd_sync::CompressedCRDTOperations;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
|
@ -28,10 +28,11 @@ mod originator {
|
|||
use sd_p2p_tunnel::Tunnel;
|
||||
|
||||
pub mod tx {
|
||||
|
||||
use super::*;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct Operations(pub Vec<CRDTOperation>);
|
||||
pub struct Operations(pub CompressedCRDTOperations);
|
||||
|
||||
impl Operations {
|
||||
// TODO: Per field errors for better error handling
|
||||
|
@ -56,8 +57,10 @@ mod originator {
|
|||
#[cfg(test)]
|
||||
#[tokio::test]
|
||||
async fn test() {
|
||||
use sd_sync::CRDTOperation;
|
||||
|
||||
{
|
||||
let original = Operations(vec![]);
|
||||
let original = Operations(CompressedCRDTOperations::new(vec![]));
|
||||
|
||||
let mut cursor = std::io::Cursor::new(original.to_bytes());
|
||||
let result = Operations::from_stream(&mut cursor).await.unwrap();
|
||||
|
@ -65,13 +68,13 @@ mod originator {
|
|||
}
|
||||
|
||||
{
|
||||
let original = Operations(vec![CRDTOperation {
|
||||
let original = Operations(CompressedCRDTOperations::new(vec![CRDTOperation {
|
||||
instance: Uuid::new_v4(),
|
||||
timestamp: sync::NTP64(0),
|
||||
record_id: rmpv::Value::Nil,
|
||||
model: 0,
|
||||
data: sd_sync::CRDTOperationData::create(),
|
||||
}]);
|
||||
}]));
|
||||
|
||||
let mut cursor = std::io::Cursor::new(original.to_bytes());
|
||||
let result = Operations::from_stream(&mut cursor).await.unwrap();
|
||||
|
@ -115,7 +118,7 @@ mod originator {
|
|||
let ops = sync.get_ops(args).await.unwrap();
|
||||
|
||||
tunnel
|
||||
.write_all(&tx::Operations(ops).to_bytes())
|
||||
.write_all(&tx::Operations(CompressedCRDTOperations::new(ops)).to_bytes())
|
||||
.await
|
||||
.unwrap();
|
||||
tunnel.flush().await.unwrap();
|
||||
|
|
|
@ -31,7 +31,7 @@ impl<S: Stream> BatchedStream<S> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<S: Stream> Stream for BatchedStream<S> {
|
||||
impl<S: Stream + Unpin> Stream for BatchedStream<S> {
|
||||
type Item = Vec<S::Item>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
use std::pin::pin;
|
||||
|
||||
use async_stream::stream;
|
||||
use futures::{Stream, StreamExt};
|
||||
use serde::Serialize;
|
||||
use specta::{reference::Reference, DataType, Type, TypeMap};
|
||||
use sync_wrapper::SyncStream;
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(untagged)]
|
||||
|
@ -26,18 +27,13 @@ impl<T: Type> Type for Output<T> {
|
|||
}
|
||||
|
||||
// Marked as unsafe as the types are a lie and this should always be used with `useUnsafeStreamedQuery`
|
||||
pub fn unsafe_streamed_query<S: Stream + Send>(
|
||||
stream: S,
|
||||
) -> impl Stream<Item = Output<S::Item>> + Send + Sync
|
||||
where
|
||||
S::Item: Send,
|
||||
{
|
||||
SyncStream::new(stream! {
|
||||
let mut stream = std::pin::pin!(stream);
|
||||
pub fn unsafe_streamed_query<S: Stream>(stream: S) -> impl Stream<Item = Output<S::Item>> {
|
||||
stream! {
|
||||
let mut stream = pin!(stream);
|
||||
while let Some(v) = stream.next().await {
|
||||
yield Output::Data(v);
|
||||
}
|
||||
|
||||
yield Output::Complete { __stream_complete: () };
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,14 +33,11 @@ impl KeyringInterface for SecretServiceKeyring {
|
|||
}
|
||||
|
||||
fn contains_key(&self, id: &Identifier) -> bool {
|
||||
self.get_collection()
|
||||
.ok()
|
||||
.map(|k| {
|
||||
k.search_items(id.as_sec_ser_identifier())
|
||||
.ok()
|
||||
.map_or(false, |x| !x.is_empty())
|
||||
})
|
||||
.unwrap_or_default()
|
||||
self.get_collection().ok().is_some_and(|k| {
|
||||
k.search_items(id.as_sec_ser_identifier())
|
||||
.ok()
|
||||
.map_or(false, |x| !x.is_empty())
|
||||
})
|
||||
}
|
||||
|
||||
fn get(&self, id: &Identifier) -> Result<Protected<Vec<u8>>> {
|
||||
|
|
|
@ -60,38 +60,3 @@ pub enum ObjectKind {
|
|||
/// Label
|
||||
Label = 26,
|
||||
}
|
||||
|
||||
impl ObjectKind {
|
||||
pub fn from_i32(value: i32) -> Self {
|
||||
match value {
|
||||
0 => ObjectKind::Unknown,
|
||||
1 => ObjectKind::Document,
|
||||
2 => ObjectKind::Folder,
|
||||
3 => ObjectKind::Text,
|
||||
4 => ObjectKind::Package,
|
||||
5 => ObjectKind::Image,
|
||||
6 => ObjectKind::Audio,
|
||||
7 => ObjectKind::Video,
|
||||
8 => ObjectKind::Archive,
|
||||
9 => ObjectKind::Executable,
|
||||
10 => ObjectKind::Alias,
|
||||
11 => ObjectKind::Encrypted,
|
||||
12 => ObjectKind::Key,
|
||||
13 => ObjectKind::Link,
|
||||
14 => ObjectKind::WebPageArchive,
|
||||
15 => ObjectKind::Widget,
|
||||
16 => ObjectKind::Album,
|
||||
17 => ObjectKind::Collection,
|
||||
18 => ObjectKind::Font,
|
||||
19 => ObjectKind::Mesh,
|
||||
20 => ObjectKind::Code,
|
||||
21 => ObjectKind::Database,
|
||||
22 => ObjectKind::Book,
|
||||
23 => ObjectKind::Config,
|
||||
24 => ObjectKind::Dotfile,
|
||||
25 => ObjectKind::Screenshot,
|
||||
26 => ObjectKind::Label,
|
||||
_ => ObjectKind::Unknown,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,8 +58,6 @@ impl<'a> Block<'a> {
|
|||
mod tests {
|
||||
use std::io::Cursor;
|
||||
|
||||
use crate::BlockSize;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
|
|
|
@ -1,39 +1,97 @@
|
|||
#![allow(non_upper_case_globals)]
|
||||
|
||||
use std::io;
|
||||
|
||||
use tokio::io::{AsyncRead, AsyncReadExt};
|
||||
|
||||
/// TODO
|
||||
const KiB: u32 = 1024;
|
||||
const MiB: u32 = 1024 * KiB;
|
||||
const GiB: u32 = 1024 * MiB;
|
||||
|
||||
/// defines the size of each chunk of data that is sent
|
||||
///
|
||||
/// We store this in an enum so it's super efficient.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct BlockSize(u32); // Max block size is gonna be 3.9GB which is stupidly overkill
|
||||
pub enum BlockSize {
|
||||
_128KiB,
|
||||
_256KiB,
|
||||
_512KiB,
|
||||
_1MiB,
|
||||
_2MiB,
|
||||
_4MiB,
|
||||
_8MiB,
|
||||
_16MiB,
|
||||
}
|
||||
|
||||
impl BlockSize {
|
||||
// TODO: Validating `BlockSize` are multiple of 2, i think. Idk why but BEP does it.
|
||||
|
||||
pub async fn from_stream(stream: &mut (impl AsyncRead + Unpin)) -> io::Result<Self> {
|
||||
stream.read_u32_le().await.map(Self)
|
||||
}
|
||||
|
||||
/// Determine the optimal block size for a given file size
|
||||
#[must_use]
|
||||
pub fn to_bytes(&self) -> [u8; 4] {
|
||||
self.0.to_le_bytes()
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn from_size(size: u64) -> Self {
|
||||
// TODO: Something like: https://docs.syncthing.net/specs/bep-v1.html#selection-of-block-size
|
||||
Self(131_072) // 128 KiB
|
||||
}
|
||||
|
||||
/// This is super dangerous as it doesn't enforce any assumptions of the protocol and is designed just for tests.
|
||||
#[cfg(test)]
|
||||
#[must_use]
|
||||
pub fn dangerously_new(size: u32) -> Self {
|
||||
Self(size)
|
||||
pub fn from_file_size(size: u64) -> Self {
|
||||
// Values directly copied from https://docs.syncthing.net/specs/bep-v1.html#selection-of-block-size
|
||||
if size < 250 * u64::from(MiB) {
|
||||
return Self::_128KiB;
|
||||
} else if size < 500 * u64::from(MiB) {
|
||||
return Self::_256KiB;
|
||||
} else if size < u64::from(GiB) {
|
||||
return Self::_512KiB;
|
||||
} else if size < 2 * u64::from(GiB) {
|
||||
return Self::_1MiB;
|
||||
} else if size < 4 * u64::from(GiB) {
|
||||
return Self::_2MiB;
|
||||
} else if size < 8 * u64::from(GiB) {
|
||||
return Self::_4MiB;
|
||||
} else if size < 16 * u64::from(GiB) {
|
||||
return Self::_8MiB;
|
||||
}
|
||||
Self::_16MiB
|
||||
}
|
||||
|
||||
/// Get the size of the block in bytes
|
||||
#[must_use]
|
||||
pub fn size(&self) -> u32 {
|
||||
self.0
|
||||
match self {
|
||||
Self::_128KiB => 128 * KiB,
|
||||
Self::_256KiB => 256 * KiB,
|
||||
Self::_512KiB => 512 * KiB,
|
||||
Self::_1MiB => MiB,
|
||||
Self::_2MiB => 2 * MiB,
|
||||
Self::_4MiB => 4 * MiB,
|
||||
Self::_8MiB => 8 * MiB,
|
||||
Self::_16MiB => 16 * MiB,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn from_stream(stream: &mut (impl AsyncRead + Unpin)) -> io::Result<Self> {
|
||||
// WARNING: Be careful modifying this cause it may break backwards/forwards-compatibility
|
||||
match stream.read_u8().await? {
|
||||
0 => Ok(Self::_128KiB),
|
||||
1 => Ok(Self::_256KiB),
|
||||
2 => Ok(Self::_512KiB),
|
||||
3 => Ok(Self::_1MiB),
|
||||
4 => Ok(Self::_2MiB),
|
||||
5 => Ok(Self::_4MiB),
|
||||
6 => Ok(Self::_8MiB),
|
||||
7 => Ok(Self::_16MiB),
|
||||
_ => Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
"Invalid block size",
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn to_bytes(&self) -> [u8; 1] {
|
||||
// WARNING: Be careful modifying this cause it may break backwards/forwards-compatibility
|
||||
[match self {
|
||||
Self::_128KiB => 0,
|
||||
Self::_256KiB => 1,
|
||||
Self::_512KiB => 2,
|
||||
Self::_1MiB => 3,
|
||||
Self::_2MiB => 4,
|
||||
Self::_4MiB => 5,
|
||||
Self::_8MiB => 6,
|
||||
Self::_16MiB => 7,
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -45,7 +103,14 @@ mod tests {
|
|||
|
||||
#[tokio::test]
|
||||
async fn test_block_size() {
|
||||
let req = BlockSize::dangerously_new(5);
|
||||
let req = BlockSize::_128KiB;
|
||||
let bytes = req.to_bytes();
|
||||
let req2 = BlockSize::from_stream(&mut Cursor::new(bytes))
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(req, req2);
|
||||
|
||||
let req = BlockSize::_16MiB;
|
||||
let bytes = req.to_bytes();
|
||||
let req2 = BlockSize::from_stream(&mut Cursor::new(bytes))
|
||||
.await
|
||||
|
|
|
@ -1,32 +1,22 @@
|
|||
//! TODO
|
||||
// TODO: Clippy lints here
|
||||
|
||||
//! Spaceblock is a file transfer protocol that uses a block based system to transfer files.
|
||||
//! This protocol is modelled after `SyncThing`'s BEP protocol. A huge thanks to it's original authors!
|
||||
//! A protocol for efficiently and securely transferring files between peers.
|
||||
//!
|
||||
//! Goals:
|
||||
//! - Fast - Transfer files as quickly as possible
|
||||
//! - Safe - Verify the files integrity on both ends
|
||||
//!
|
||||
//! This protocol was heavily inspired by SyncThing's Block Exchange Protocol protocol although it's not compatible.
|
||||
//! You can read more about it here: <https://docs.syncthing.net/specs/bep-v1.html>
|
||||
#![allow(unused)] // TODO: This module is still in heavy development!
|
||||
//!
|
||||
#![warn(clippy::unwrap_used, clippy::panic)]
|
||||
|
||||
use std::{
|
||||
io,
|
||||
marker::PhantomData,
|
||||
path::{Path, PathBuf},
|
||||
string::FromUtf8Error,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
},
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
};
|
||||
|
||||
use thiserror::Error;
|
||||
use tokio::{
|
||||
fs::File,
|
||||
io::{AsyncBufRead, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, BufReader},
|
||||
};
|
||||
use tokio::io::{AsyncBufRead, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||
use tracing::debug;
|
||||
|
||||
use sd_p2p::UnicastStream;
|
||||
use sd_p2p_proto::{decode, encode};
|
||||
|
||||
mod block;
|
||||
mod block_size;
|
||||
mod sb_request;
|
||||
|
@ -123,9 +113,8 @@ where
|
|||
); // SAFETY: Percent must be between 0 and 100
|
||||
|
||||
if read == 0 {
|
||||
#[allow(clippy::panic)] // TODO: Remove panic
|
||||
// The file may have been modified during sender on the sender and we don't account for that.
|
||||
// TODO: Error handling + send error to remote
|
||||
// The file may have been modified during sender on the sender and we don't account for that.
|
||||
// TODO: Error handling + send error to remote
|
||||
assert!(
|
||||
(offset + read as u64) == self.reqs.requests[self.i].size,
|
||||
"File sending has stopped but it doesn't match the expected length!"
|
||||
|
@ -236,9 +225,9 @@ where
|
|||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::{io::Cursor, mem};
|
||||
use std::{io::Cursor, mem, sync::Arc};
|
||||
|
||||
use tokio::sync::oneshot;
|
||||
use tokio::{io::BufReader, sync::oneshot};
|
||||
use uuid::Uuid;
|
||||
|
||||
use super::*;
|
||||
|
@ -251,7 +240,7 @@ mod tests {
|
|||
let data = b"Spacedrive".to_vec();
|
||||
let req = SpaceblockRequests {
|
||||
id: Uuid::new_v4(),
|
||||
block_size: BlockSize::from_size(data.len() as u64),
|
||||
block_size: BlockSize::from_file_size(data.len() as u64),
|
||||
requests: vec![SpaceblockRequest {
|
||||
name: "Demo".to_string(),
|
||||
size: data.len() as u64,
|
||||
|
@ -287,9 +276,8 @@ mod tests {
|
|||
let (mut client, mut server) = tokio::io::duplex(64);
|
||||
|
||||
// This is sent out of band of Spaceblock
|
||||
let block_size = 131_072_u32;
|
||||
let data = vec![0u8; block_size as usize * 4]; // Let's pacman some RAM
|
||||
let block_size = BlockSize::dangerously_new(block_size);
|
||||
let block_size = BlockSize::_128KiB;
|
||||
let data = vec![0u8; block_size.size() as usize * 4]; // Let's pacman some RAM
|
||||
|
||||
let req = SpaceblockRequests {
|
||||
id: Uuid::new_v4(),
|
||||
|
@ -328,9 +316,8 @@ mod tests {
|
|||
let (mut client, mut server) = tokio::io::duplex(64);
|
||||
|
||||
// This is sent out of band of Spaceblock
|
||||
let block_size = 25u32;
|
||||
let data = vec![0u8; block_size as usize];
|
||||
let block_size = BlockSize::dangerously_new(block_size); // TODO: Determine it using proper algo instead of hardcoding it
|
||||
let block_size = BlockSize::_128KiB;
|
||||
let data = vec![0u8; block_size.size() as usize];
|
||||
|
||||
let req = SpaceblockRequests {
|
||||
id: Uuid::new_v4(),
|
||||
|
@ -370,9 +357,8 @@ mod tests {
|
|||
let (mut client, mut server) = tokio::io::duplex(64);
|
||||
|
||||
// This is sent out of band of Spaceblock
|
||||
let block_size = 25u32;
|
||||
let data = vec![0u8; block_size as usize];
|
||||
let block_size = BlockSize::dangerously_new(block_size); // TODO: Determine it using proper algo instead of hardcoding it
|
||||
let block_size = BlockSize::_128KiB;
|
||||
let data = vec![0u8; block_size.size() as usize];
|
||||
|
||||
let req = SpaceblockRequests {
|
||||
id: Uuid::new_v4(),
|
||||
|
@ -413,9 +399,8 @@ mod tests {
|
|||
let (mut client, mut server) = tokio::io::duplex(64);
|
||||
|
||||
// This is sent out of band of Spaceblock
|
||||
let block_size = 25u32;
|
||||
let block_size = BlockSize::_128KiB;
|
||||
let data = vec![0u8; 0]; // Zero sized file
|
||||
let block_size = BlockSize::dangerously_new(block_size); // TODO: Determine it using proper algo instead of hardcoding it
|
||||
|
||||
let req = SpaceblockRequests {
|
||||
id: Uuid::new_v4(),
|
||||
|
|
|
@ -88,7 +88,7 @@ impl SpaceblockRequests {
|
|||
.map_err(SpaceblockRequestsError::InvalidLen)?;
|
||||
|
||||
let mut requests = Vec::new();
|
||||
for i in 0..size {
|
||||
for _i in 0..size {
|
||||
requests.push(SpaceblockRequest::from_stream(stream).await?);
|
||||
}
|
||||
|
||||
|
@ -106,7 +106,6 @@ impl SpaceblockRequests {
|
|||
block_size,
|
||||
requests,
|
||||
} = self;
|
||||
#[allow(clippy::panic)] // TODO: Remove this panic
|
||||
assert!(
|
||||
requests.len() <= 255,
|
||||
"Can't Spacedrop more than 255 files at once!"
|
||||
|
@ -167,10 +166,9 @@ impl SpaceblockRequest {
|
|||
|
||||
#[must_use]
|
||||
pub fn to_bytes(&self) -> Vec<u8> {
|
||||
let Self { name, size, range } = self;
|
||||
let mut buf = Vec::new();
|
||||
|
||||
encode::string(&mut buf, name);
|
||||
encode::string(&mut buf, &self.name);
|
||||
buf.extend_from_slice(&self.size.to_le_bytes());
|
||||
buf.extend_from_slice(&self.range.to_bytes());
|
||||
buf
|
||||
|
@ -200,7 +198,7 @@ mod tests {
|
|||
async fn test_spaceblock_requests_empty() {
|
||||
let req = SpaceblockRequests {
|
||||
id: Uuid::new_v4(),
|
||||
block_size: BlockSize::from_size(42069),
|
||||
block_size: BlockSize::from_file_size(42069),
|
||||
requests: vec![],
|
||||
};
|
||||
|
||||
|
@ -215,7 +213,7 @@ mod tests {
|
|||
async fn test_spaceblock_requests_one() {
|
||||
let req = SpaceblockRequests {
|
||||
id: Uuid::new_v4(),
|
||||
block_size: BlockSize::from_size(42069),
|
||||
block_size: BlockSize::from_file_size(42069),
|
||||
requests: vec![SpaceblockRequest {
|
||||
name: "Demo".to_string(),
|
||||
size: 42069,
|
||||
|
@ -246,7 +244,7 @@ mod tests {
|
|||
async fn test_spaceblock_requests_many() {
|
||||
let req = SpaceblockRequests {
|
||||
id: Uuid::new_v4(),
|
||||
block_size: BlockSize::from_size(42069),
|
||||
block_size: BlockSize::from_file_size(42069),
|
||||
requests: vec![
|
||||
SpaceblockRequest {
|
||||
name: "Demo".to_string(),
|
||||
|
|
|
@ -1,30 +0,0 @@
|
|||
[package]
|
||||
name = "sd-indexer"
|
||||
version = "0.0.1"
|
||||
license.workspace = true
|
||||
edition.workspace = true
|
||||
repository.workspace = true
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
sd-utils = { path = "../utils" }
|
||||
sd-file-ext = { path = "../file-ext" }
|
||||
sd-core-file-path-helper = { path = "../../core/crates/file-path-helper" }
|
||||
sd-core-indexer-rules = { path = "../../core/crates/indexer-rules" }
|
||||
|
||||
chrono.workspace = true
|
||||
futures-util = "0.3.30"
|
||||
globset = { version = "0.4.14", features = ["serde1"] }
|
||||
opendal = "0.45.1"
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
specta.workspace = true
|
||||
thiserror.workspace = true
|
||||
tracing.workspace = true
|
||||
rmp-serde = "1.1.2"
|
||||
|
||||
# TODO: Remove these
|
||||
rspc.workspace = true
|
||||
tokio = { workspace = true, features = ["fs"] }
|
||||
sd-prisma = { path = "../prisma" }
|
||||
tempfile.workspace = true
|
||||
normpath = { workspace = true, features = ["localization"] }
|
|
@ -1,212 +0,0 @@
|
|||
use std::{
|
||||
future::ready,
|
||||
io::{self, ErrorKind},
|
||||
path::PathBuf,
|
||||
};
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use futures_util::{Stream, StreamExt, TryFutureExt};
|
||||
use opendal::{Operator, Scheme};
|
||||
use sd_core_file_path_helper::path_is_hidden;
|
||||
use sd_core_indexer_rules::{IndexerRule, RuleKind};
|
||||
use sd_file_ext::{extensions::Extension, kind::ObjectKind};
|
||||
use serde::Serialize;
|
||||
use specta::Type;
|
||||
|
||||
use crate::stream::TaskStream;
|
||||
|
||||
#[derive(Serialize, Type, Debug)]
|
||||
pub struct NonIndexedPathItem {
|
||||
pub path: String,
|
||||
pub name: String,
|
||||
pub extension: String,
|
||||
pub kind: i32, // TODO: Use `ObjectKind` instead
|
||||
// TODO: Use `kind` instead and drop this
|
||||
pub is_dir: bool,
|
||||
pub date_created: DateTime<Utc>,
|
||||
pub date_modified: DateTime<Utc>,
|
||||
pub size_in_bytes_bytes: Vec<u8>,
|
||||
pub hidden: bool,
|
||||
}
|
||||
|
||||
pub async fn ephemeral(
|
||||
opendal: Operator,
|
||||
rules: Vec<IndexerRule>,
|
||||
path: &str,
|
||||
) -> opendal::Result<impl Stream<Item = io::Result<NonIndexedPathItem>>> {
|
||||
let is_fs = opendal.info().scheme() == Scheme::Fs;
|
||||
let base_path = PathBuf::from(opendal.info().root());
|
||||
let mut lister = opendal.lister(path).await?;
|
||||
|
||||
Ok(TaskStream::new(move |tx| async move {
|
||||
let rules = &*rules;
|
||||
while let Some(entry) = lister.next().await {
|
||||
let base_path = base_path.clone();
|
||||
let result = ready(entry)
|
||||
.map_err(|err| io::Error::new(ErrorKind::Other, format!("OpenDAL: {err:?}")))
|
||||
.and_then(|entry| async move {
|
||||
let path = base_path.join(entry.path());
|
||||
|
||||
let extension = (!path.is_dir())
|
||||
.then(|| {
|
||||
path.extension()
|
||||
.and_then(|s| s.to_str().map(str::to_string))
|
||||
.unwrap_or_default()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
|
||||
// Only Windows supports normalised files without FS access.
|
||||
// For now we only do normalisation for local files.
|
||||
let (relative_path, name) = if is_fs {
|
||||
crate::path::normalize_path(&path).map_err(|err| {
|
||||
io::Error::new(
|
||||
ErrorKind::Other,
|
||||
format!("Error normalising path '{path:?}': {err:?}"),
|
||||
)
|
||||
})?
|
||||
} else {
|
||||
unreachable!();
|
||||
// (
|
||||
// path.file_stem()
|
||||
// .and_then(|s| s.to_str().map(str::to_string))
|
||||
// .ok_or_else(|| {
|
||||
// io::Error::new(
|
||||
// ErrorKind::Other,
|
||||
// "error on file '{path:?}: non UTF-8",
|
||||
// )
|
||||
// })?
|
||||
// .to_string(),
|
||||
// path.to_str()
|
||||
// .expect("non UTF-8 path - is unreachable")
|
||||
// .to_string(),
|
||||
// )
|
||||
};
|
||||
|
||||
let kind = if entry.metadata().is_dir() {
|
||||
ObjectKind::Folder
|
||||
} else if is_fs {
|
||||
Extension::resolve_conflicting(&path, false)
|
||||
.await
|
||||
.map(Into::into)
|
||||
.unwrap_or(ObjectKind::Unknown)
|
||||
} else {
|
||||
// TODO: Determine kind of remote files - https://linear.app/spacedriveapp/issue/ENG-1718/fix-objectkind-of-remote-files
|
||||
ObjectKind::Unknown
|
||||
};
|
||||
|
||||
let name = (kind != ObjectKind::Folder)
|
||||
.then(|| {
|
||||
path.file_stem()
|
||||
.and_then(|s| s.to_str().map(str::to_string))
|
||||
})
|
||||
.flatten()
|
||||
.unwrap_or(name);
|
||||
|
||||
let mut path = path
|
||||
.to_str()
|
||||
.expect("comes from string so this is impossible")
|
||||
.to_string();
|
||||
|
||||
// OpenDAL will *always* end in a `/` for directories, we strip it here so we can give the path to Tokio.
|
||||
if path.ends_with('/') && path.len() > 1 {
|
||||
path.pop();
|
||||
}
|
||||
|
||||
let result = IndexerRule::apply_all(rules, &path).await.map_err(|err| {
|
||||
io::Error::new(
|
||||
ErrorKind::Other,
|
||||
format!("Error running indexer rules on file '{path:?}': {err:?}"),
|
||||
)
|
||||
})?;
|
||||
|
||||
// No OS Protected and No Hidden rules, must always be from this kind, should panic otherwise
|
||||
if result[&RuleKind::RejectFilesByGlob]
|
||||
.iter()
|
||||
.any(|reject| !reject)
|
||||
{
|
||||
return Ok(None); // Skip this file
|
||||
};
|
||||
|
||||
// TODO: OpenDAL last modified time - https://linear.app/spacedriveapp/issue/ENG-1717/fix-modified-time
|
||||
// TODO: OpenDAL hidden files - https://linear.app/spacedriveapp/issue/ENG-1720/fix-hidden-files
|
||||
let (hidden, date_created, date_modified, size) = if is_fs {
|
||||
let metadata = tokio::fs::metadata(&path).await.map_err(|err| {
|
||||
io::Error::new(
|
||||
ErrorKind::Other,
|
||||
format!("Error getting metadata for '{path:?}': {err:?}"),
|
||||
)
|
||||
})?;
|
||||
|
||||
(
|
||||
path_is_hidden(&path, &metadata),
|
||||
metadata
|
||||
.created()
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
ErrorKind::Other,
|
||||
format!("Error determining created time for '{path:?}': {err:?}"),
|
||||
)
|
||||
})?
|
||||
.into(),
|
||||
metadata
|
||||
.modified()
|
||||
.map_err(|err| {
|
||||
io::Error::new(
|
||||
ErrorKind::Other,
|
||||
format!("Error determining modified time for '{path:?}': {err:?}"),
|
||||
)
|
||||
})?
|
||||
.into(),
|
||||
metadata.len(),
|
||||
)
|
||||
} else {
|
||||
(false, Default::default(), Default::default(), 0)
|
||||
};
|
||||
|
||||
// TODO: Fix this - https://linear.app/spacedriveapp/issue/ENG-1725/fix-last-modified
|
||||
#[allow(clippy::redundant_locals)]
|
||||
let date_modified = date_modified;
|
||||
// entry.metadata().last_modified().ok_or_else(|| {
|
||||
// io::Error::new(
|
||||
// ErrorKind::Other,
|
||||
// format!("Error getting modified time for '{path:?}'"),
|
||||
// )
|
||||
// })?;
|
||||
|
||||
#[allow(clippy::redundant_locals)]
|
||||
// TODO: Fix this - https://linear.app/spacedriveapp/issue/ENG-1726/fix-file-size
|
||||
let size = size;
|
||||
|
||||
Ok(Some(NonIndexedPathItem {
|
||||
path: relative_path,
|
||||
name,
|
||||
extension,
|
||||
kind: kind as i32,
|
||||
is_dir: kind == ObjectKind::Folder,
|
||||
date_created,
|
||||
date_modified,
|
||||
// TODO
|
||||
// entry
|
||||
// .metadata()
|
||||
// .content_length()
|
||||
size_in_bytes_bytes: size.to_be_bytes().to_vec(),
|
||||
hidden,
|
||||
}))
|
||||
})
|
||||
.await;
|
||||
|
||||
if tx
|
||||
.send(match result {
|
||||
Ok(Some(item)) => Ok(item),
|
||||
Ok(None) => continue,
|
||||
Err(err) => Err(err),
|
||||
})
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
// Stream has been dropped.
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}))
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
mod ephemeral;
|
||||
pub mod path;
|
||||
mod stream;
|
||||
|
||||
pub use ephemeral::*;
|
|
@ -1,57 +0,0 @@
|
|||
use std::{
|
||||
io,
|
||||
path::{Component, Path},
|
||||
};
|
||||
|
||||
use normpath::PathExt;
|
||||
|
||||
pub fn normalize_path(path: impl AsRef<Path>) -> io::Result<(String, String)> {
|
||||
let mut path = path.as_ref().to_path_buf();
|
||||
let (location_path, normalized_path) = path
|
||||
// Normalize path and also check if it exists
|
||||
.normalize()
|
||||
.and_then(|normalized_path| {
|
||||
if cfg!(windows) {
|
||||
// Use normalized path as main path on Windows
|
||||
// This ensures we always receive a valid windows formatted path
|
||||
// ex: /Users/JohnDoe/Downloads will become C:\Users\JohnDoe\Downloads
|
||||
// Internally `normalize` calls `GetFullPathNameW` on Windows
|
||||
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfullpathnamew
|
||||
path = normalized_path.as_path().to_path_buf();
|
||||
}
|
||||
|
||||
Ok((
|
||||
// TODO: Maybe save the path bytes instead of the string representation to avoid depending on UTF-8
|
||||
path.to_str().map(str::to_string).ok_or(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Found non-UTF-8 path",
|
||||
))?,
|
||||
normalized_path,
|
||||
))
|
||||
})?;
|
||||
|
||||
// Not needed on Windows because the normalization already handles it
|
||||
if cfg!(not(windows)) {
|
||||
// Replace location_path with normalize_path, when the first one ends in `.` or `..`
|
||||
// This is required so localize_name doesn't panic
|
||||
if let Some(component) = path.components().next_back() {
|
||||
if matches!(component, Component::CurDir | Component::ParentDir) {
|
||||
path = normalized_path.as_path().to_path_buf();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Use `to_string_lossy` because a partially corrupted but identifiable name is better than nothing
|
||||
let mut name = path.localize_name().to_string_lossy().to_string();
|
||||
|
||||
// Windows doesn't have a root directory
|
||||
if cfg!(not(windows)) && name == "/" {
|
||||
name = "Root".to_string()
|
||||
}
|
||||
|
||||
if name.replace(char::REPLACEMENT_CHARACTER, "") == "" {
|
||||
name = "Unknown".to_string()
|
||||
}
|
||||
|
||||
Ok((location_path, name))
|
||||
}
|
|
@ -1,40 +0,0 @@
|
|||
use std::{
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use futures_util::Future;
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
/// Construct a stream from a Tokio task.
|
||||
/// Similar to `tokio_stream::stream!` but not a macro for better DX.
|
||||
pub struct TaskStream<T> {
|
||||
task: tokio::task::JoinHandle<()>,
|
||||
receiver: mpsc::Receiver<T>,
|
||||
}
|
||||
|
||||
impl<T: Send + 'static> TaskStream<T> {
|
||||
pub fn new<F: Future + Send>(task: impl FnOnce(mpsc::Sender<T>) -> F + Send + 'static) -> Self {
|
||||
let (tx, rx) = mpsc::channel(256);
|
||||
Self {
|
||||
task: tokio::spawn(async move {
|
||||
task(tx).await;
|
||||
}),
|
||||
receiver: rx,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> futures_util::Stream for TaskStream<T> {
|
||||
type Item = T;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
self.receiver.poll_recv(cx)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for TaskStream<T> {
|
||||
fn drop(&mut self) {
|
||||
self.task.abort();
|
||||
}
|
||||
}
|
|
@ -7,10 +7,8 @@ use crate::{CRDTOperation, CRDTOperationData};
|
|||
pub type CompressedCRDTOperationsForModel = Vec<(rmpv::Value, Vec<CompressedCRDTOperation>)>;
|
||||
|
||||
/// Stores a bunch of CRDTOperations in a more memory-efficient form for sending to the cloud.
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct CompressedCRDTOperations(
|
||||
pub(self) Vec<(Uuid, Vec<(u16, CompressedCRDTOperationsForModel)>)>,
|
||||
);
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq)]
|
||||
pub struct CompressedCRDTOperations(pub Vec<(Uuid, Vec<(u16, CompressedCRDTOperationsForModel)>)>);
|
||||
|
||||
impl CompressedCRDTOperations {
|
||||
pub fn new(ops: Vec<CRDTOperation>) -> Self {
|
||||
|
@ -25,7 +23,7 @@ impl CompressedCRDTOperations {
|
|||
let mut instance_id = first.instance;
|
||||
let mut instance = vec![];
|
||||
|
||||
let mut model_str = first.model.clone();
|
||||
let mut model_str = first.model;
|
||||
let mut model = vec![];
|
||||
|
||||
let mut record_id = first.record_id.clone();
|
||||
|
@ -38,7 +36,7 @@ impl CompressedCRDTOperations {
|
|||
std::mem::take(&mut record),
|
||||
));
|
||||
instance.push((
|
||||
std::mem::replace(&mut model_str, op.model.clone()),
|
||||
std::mem::replace(&mut model_str, op.model),
|
||||
std::mem::take(&mut model),
|
||||
));
|
||||
compressed.push((
|
||||
|
@ -51,7 +49,7 @@ impl CompressedCRDTOperations {
|
|||
std::mem::take(&mut record),
|
||||
));
|
||||
instance.push((
|
||||
std::mem::replace(&mut model_str, op.model.clone()),
|
||||
std::mem::replace(&mut model_str, op.model),
|
||||
std::mem::take(&mut model),
|
||||
));
|
||||
} else if record_id != op.record_id {
|
||||
|
@ -71,6 +69,35 @@ impl CompressedCRDTOperations {
|
|||
Self(compressed)
|
||||
}
|
||||
|
||||
pub fn first(&self) -> Option<(Uuid, u16, &rmpv::Value, &CompressedCRDTOperation)> {
|
||||
self.0.first().and_then(|(instance, data)| {
|
||||
data.first().and_then(|(model, data)| {
|
||||
data.first()
|
||||
.and_then(|(record, ops)| ops.first().map(|op| (*instance, *model, record, op)))
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
pub fn last(&self) -> Option<(Uuid, u16, &rmpv::Value, &CompressedCRDTOperation)> {
|
||||
self.0.last().and_then(|(instance, data)| {
|
||||
data.last().and_then(|(model, data)| {
|
||||
data.last()
|
||||
.and_then(|(record, ops)| ops.last().map(|op| (*instance, *model, record, op)))
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.0
|
||||
.iter()
|
||||
.map(|(_, data)| {
|
||||
data.iter()
|
||||
.map(|(_, data)| data.iter().map(|(_, ops)| ops.len()).sum::<usize>())
|
||||
.sum::<usize>()
|
||||
})
|
||||
.sum::<usize>()
|
||||
}
|
||||
|
||||
pub fn into_ops(self) -> Vec<CRDTOperation> {
|
||||
let mut ops = vec![];
|
||||
|
||||
|
@ -80,7 +107,7 @@ impl CompressedCRDTOperations {
|
|||
for op in record {
|
||||
ops.push(CRDTOperation {
|
||||
instance: instance_id,
|
||||
model: model_str.clone(),
|
||||
model: model_str,
|
||||
record_id: record_id.clone(),
|
||||
timestamp: op.timestamp,
|
||||
data: op.data,
|
||||
|
@ -94,7 +121,7 @@ impl CompressedCRDTOperations {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Serialize, Deserialize, Clone)]
|
||||
#[derive(PartialEq, Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct CompressedCRDTOperation {
|
||||
pub timestamp: NTP64,
|
||||
pub data: CRDTOperationData,
|
||||
|
|
|
@ -13,8 +13,7 @@ use async_channel as chan;
|
|||
use async_trait::async_trait;
|
||||
use chan::{Recv, RecvError};
|
||||
use downcast_rs::{impl_downcast, Downcast};
|
||||
use futures::executor::block_on;
|
||||
use tokio::sync::oneshot;
|
||||
use tokio::{runtime::Handle, sync::oneshot};
|
||||
use tracing::{trace, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
|
@ -58,6 +57,12 @@ pub enum TaskOutput {
|
|||
Empty,
|
||||
}
|
||||
|
||||
impl From<()> for TaskOutput {
|
||||
fn from((): ()) -> Self {
|
||||
Self::Empty
|
||||
}
|
||||
}
|
||||
|
||||
/// An enum representing all possible outcomes for a task.
|
||||
#[derive(Debug)]
|
||||
pub enum TaskStatus<E: RunError> {
|
||||
|
@ -125,14 +130,8 @@ impl<T: Task<E> + 'static, E: RunError> IntoTask<E> for T {
|
|||
/// due to a limitation in the Rust language.
|
||||
#[async_trait]
|
||||
pub trait Task<E: RunError>: fmt::Debug + Downcast + Send + Sync + 'static {
|
||||
/// This method represent the work that should be done by the worker, it will be called by the
|
||||
/// worker when there is a slot available in its internal queue.
|
||||
/// We receive a `&mut self` so any internal data can be mutated on each `run` invocation.
|
||||
///
|
||||
/// The [`interrupter`](Interrupter) is a helper object that can be used to check if the user requested a pause or a cancel,
|
||||
/// so the user can decide the appropriated moment to pause or cancel the task. Avoiding corrupted data or
|
||||
/// inconsistent states.
|
||||
async fn run(&mut self, interrupter: &Interrupter) -> Result<ExecStatus, E>;
|
||||
/// An unique identifier for the task, it will be used to identify the task on the system and also to the user.
|
||||
fn id(&self) -> TaskId;
|
||||
|
||||
/// This method defines whether a task should run with priority or not. The task system has a mechanism
|
||||
/// to suspend non-priority tasks on any worker and run priority tasks ASAP. This is useful for tasks that
|
||||
|
@ -142,8 +141,14 @@ pub trait Task<E: RunError>: fmt::Debug + Downcast + Send + Sync + 'static {
|
|||
false
|
||||
}
|
||||
|
||||
/// An unique identifier for the task, it will be used to identify the task on the system and also to the user.
|
||||
fn id(&self) -> TaskId;
|
||||
/// This method represent the work that should be done by the worker, it will be called by the
|
||||
/// worker when there is a slot available in its internal queue.
|
||||
/// We receive a `&mut self` so any internal data can be mutated on each `run` invocation.
|
||||
///
|
||||
/// The [`interrupter`](Interrupter) is a helper object that can be used to check if the user requested a pause or a cancel,
|
||||
/// so the user can decide the appropriated moment to pause or cancel the task. Avoiding corrupted data or
|
||||
/// inconsistent states.
|
||||
async fn run(&mut self, interrupter: &Interrupter) -> Result<ExecStatus, E>;
|
||||
}
|
||||
|
||||
impl_downcast!(Task<E> where E: RunError);
|
||||
|
@ -508,7 +513,7 @@ impl<E: RunError> Future for CancelTaskOnDrop<E> {
|
|||
impl<E: RunError> Drop for CancelTaskOnDrop<E> {
|
||||
fn drop(&mut self) {
|
||||
// FIXME: We should use async drop when it becomes stable
|
||||
block_on(self.0.cancel());
|
||||
Handle::current().block_on(self.0.cancel());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -17,8 +17,8 @@ This means the queries will always render the newest version of the model.
|
|||
|
||||
## Terminology
|
||||
|
||||
- `CacheNode`: A node in the cache - this contains the data and can be identified by the model's name and unique ID within the data (eg. database primary key).
|
||||
- `Reference<T>`: A reference to a node in the cache - This contains the model's name and unique ID.
|
||||
- `CacheNode`: A node in the cache - this contains the data and can be identified by the model's name and unique ID within the data (eg. database primary key).
|
||||
- `Reference<T>`: A reference to a node in the cache - This contains the model's name and unique ID.
|
||||
|
||||
## High level overview
|
||||
|
||||
|
@ -26,8 +26,7 @@ We turn the data on the backend into a list of `CacheNode`'s and a list of `Refe
|
|||
|
||||
We insert the `CacheNode`'s into a global cache on the frontend and then use the `Reference<T>`'s to reconstruct the data by looking up the `CacheNode`'s.
|
||||
|
||||
When the cache changes (from another query, invalidation, etc), we can reconstruct *all* queries using their `Reference<T>`'s to reflect the updated data.
|
||||
|
||||
When the cache changes (from another query, invalidation, etc), we can reconstruct _all_ queries using their `Reference<T>`'s to reflect the updated data.
|
||||
|
||||
## Rust usage
|
||||
|
||||
|
@ -129,7 +128,6 @@ const filePaths = useCache(query.data?.file_paths);
|
|||
|
||||
This is only possible because `useNodes` and `useCache` take in a specific key, instead of the whole `data` object, so you can tell it where to look.
|
||||
|
||||
|
||||
## Known issues
|
||||
|
||||
### Specta support
|
||||
|
|
|
@ -7,16 +7,17 @@ We use a fork based on [rspc 0.1.4](https://docs.rs/rspc) which contains heavy m
|
|||
|
||||
## What's different?
|
||||
|
||||
- A super pre-release version of rspc v1's procedure syntax.
|
||||
- Upgrade to Specta v2 prelease
|
||||
- Add `Router::sd_patch_types_dangerously`
|
||||
- Expose internal type maps for the invalidation system.
|
||||
- All procedures must return a result
|
||||
- `Procedure::with2` which is a hack to properly support the middleware mapper API
|
||||
- Legacy executor system - Will require major changes to the React Native link.
|
||||
- A super pre-release version of rspc v1's procedure syntax.
|
||||
- Upgrade to Specta v2 prelease
|
||||
- Add `Router::sd_patch_types_dangerously`
|
||||
- Expose internal type maps for the invalidation system.
|
||||
- All procedures must return a result
|
||||
- `Procedure::with2` which is a hack to properly support the middleware mapper API
|
||||
- Legacy executor system - Will require major changes to the React Native link.
|
||||
|
||||
Removed features relied on by Spacedrive:
|
||||
- Argument middleware mapper API has been removed upstream
|
||||
|
||||
- Argument middleware mapper API has been removed upstream
|
||||
|
||||
## Basic usage
|
||||
|
||||
|
@ -83,9 +84,8 @@ Minus batching HTTP requests are run in parallel.
|
|||
|
||||
### Websocket reconnect
|
||||
|
||||
If the websocket connection is dropped (due to network disruption) all subscriptions *will not* restart upon reconnecting.
|
||||
If the websocket connection is dropped (due to network disruption) all subscriptions _will not_ restart upon reconnecting.
|
||||
|
||||
This will cause the invalidation system to break and potentially other parts of the app that rely on subscriptions.
|
||||
|
||||
Queries and mutations done during the network disruption will hang indefinitely.
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ You can run Spacedrive in a Docker container using the following command.
|
|||
/>
|
||||
|
||||
```bash
|
||||
docker run -d --name spacedrive -p 8080:8080 -e SD_AUTH=admin,spacedrive -v /var/spacedrive:/var/spacedrive ghcr.io/spacedriveapp/spacedrive/server
|
||||
docker run -d --name spacedrive -p 8080:8080 -e SD_AUTH=admin:spacedrive -v /var/spacedrive:/var/spacedrive ghcr.io/spacedriveapp/spacedrive/server
|
||||
```
|
||||
|
||||
#### Authentication
|
||||
|
@ -44,9 +44,10 @@ docker run -d --name spacedrive -p 8080:8080 -e SD_AUTH=admin,spacedrive -v /var
|
|||
When using the Spacedrive server you can use the `SD_AUTH` environment variable to configure authentication.
|
||||
|
||||
Valid values:
|
||||
- `SD_AUTH=disabled` - Disables authentication.
|
||||
- `SD_AUTH=username:password` - Enables authentication for a single user.
|
||||
- `SD_AUTH=username:password,username1:password1` - Enables authentication with multiple users (you can add as many users as you want).
|
||||
|
||||
- `SD_AUTH=disabled` - Disables authentication.
|
||||
- `SD_AUTH=username:password` - Enables authentication for a single user.
|
||||
- `SD_AUTH=username:password,username1:password1` - Enables authentication with multiple users (you can add as many users as you want).
|
||||
|
||||
### Mobile (Preview)
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ import { usePlatform } from '~/util/Platform';
|
|||
|
||||
import { useExplorerContext } from '../Context';
|
||||
import { explorerStore } from '../store';
|
||||
import { ExplorerItemData } from '../util';
|
||||
import { ExplorerItemData } from '../useExplorerItemData';
|
||||
import { Image } from './Image';
|
||||
import { useBlackBars, useSize } from './utils';
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ import { useIsDark } from '~/hooks';
|
|||
import { pdfViewerEnabled } from '~/util/pdfViewer';
|
||||
import { usePlatform } from '~/util/Platform';
|
||||
|
||||
import { useExplorerItemData } from '../util';
|
||||
import { useExplorerItemData } from '../useExplorerItemData';
|
||||
import { Image, ImageProps } from './Image';
|
||||
import LayeredFileIcon from './LayeredFileIcon';
|
||||
import { Original } from './Original';
|
||||
|
|
|
@ -55,7 +55,8 @@ import AssignTagMenuItems from '../ContextMenu/AssignTagMenuItems';
|
|||
import { FileThumb } from '../FilePath/Thumb';
|
||||
import { useQuickPreviewStore } from '../QuickPreview/store';
|
||||
import { explorerStore } from '../store';
|
||||
import { uniqueId, useExplorerItemData } from '../util';
|
||||
import { useExplorerItemData } from '../useExplorerItemData';
|
||||
import { uniqueId } from '../util';
|
||||
import { RenamableItemText } from '../View/RenamableItemText';
|
||||
import FavoriteButton from './FavoriteButton';
|
||||
import MediaData from './MediaData';
|
||||
|
|
|
@ -15,8 +15,8 @@ export const IconSize = () => {
|
|||
const explorer = useExplorerContext();
|
||||
const settings = explorer.useSettingsSnapshot();
|
||||
|
||||
const defaultValue = useMemo(
|
||||
() => sizes.findIndex((size) => size[0] === settings.listViewIconSize),
|
||||
const value = useMemo(
|
||||
() => sizes.indexMap.get(settings.listViewIconSize),
|
||||
[settings.listViewIconSize]
|
||||
);
|
||||
|
||||
|
@ -25,11 +25,11 @@ export const IconSize = () => {
|
|||
<Subheading>{t('icon_size')}</Subheading>
|
||||
<Slider
|
||||
step={1}
|
||||
max={sizes.length - 1}
|
||||
defaultValue={[defaultValue]}
|
||||
max={sizes.sizeMap.size - 1}
|
||||
value={[value ?? 0]}
|
||||
onValueChange={([value]) => {
|
||||
const size = value !== undefined && sizes[value];
|
||||
if (size) explorer.settingsStore.listViewIconSize = size[0];
|
||||
const size = value !== undefined && sizes.sizeMap.get(value);
|
||||
if (size) explorer.settingsStore.listViewIconSize = size;
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
|
|
|
@ -16,7 +16,7 @@ export const TextSize = () => {
|
|||
const settings = explorer.useSettingsSnapshot();
|
||||
|
||||
const defaultValue = useMemo(
|
||||
() => sizes.findIndex((size) => size[0] === settings.listViewTextSize),
|
||||
() => sizes.indexMap.get(settings.listViewTextSize),
|
||||
[settings.listViewTextSize]
|
||||
);
|
||||
|
||||
|
@ -25,11 +25,11 @@ export const TextSize = () => {
|
|||
<Subheading>{t('text_size')}</Subheading>
|
||||
<Slider
|
||||
step={1}
|
||||
max={sizes.length - 1}
|
||||
defaultValue={[defaultValue]}
|
||||
max={sizes.sizeMap.size - 1}
|
||||
defaultValue={[defaultValue ?? 0]}
|
||||
onValueChange={([value]) => {
|
||||
const size = value !== undefined && sizes[value];
|
||||
if (size) explorer.settingsStore.listViewTextSize = size[0];
|
||||
const size = value !== undefined && sizes.sizeMap.get(value);
|
||||
if (size) explorer.settingsStore.listViewTextSize = size;
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
|
|
|
@ -1,3 +1,18 @@
|
|||
export function getSizes<T extends { [key: string]: number }>(sizes: T) {
|
||||
return (Object.entries(sizes) as [keyof T, T[keyof T]][]).sort((a, b) => a[1] - b[1]);
|
||||
const sizesArr = (Object.entries(sizes) as [keyof T, T[keyof T]][]).sort((a, b) => a[1] - b[1]);
|
||||
|
||||
// Map fo size to index
|
||||
const indexMap = new Map<keyof T, number>();
|
||||
|
||||
// Map of index to size
|
||||
const sizeMap = new Map<number, keyof T>();
|
||||
|
||||
for (let i = 0; i < sizesArr.length; i++) {
|
||||
const size = sizesArr[i];
|
||||
if (!size) continue;
|
||||
indexMap.set(size[0], i);
|
||||
sizeMap.set(i, size[0]);
|
||||
}
|
||||
|
||||
return { indexMap, sizeMap };
|
||||
}
|
||||
|
|
|
@ -108,7 +108,7 @@ export default () => {
|
|||
onValueChange={(value) => {
|
||||
explorer.settingsStore.gridItemSize = value[0] || 100;
|
||||
}}
|
||||
defaultValue={[settings.gridItemSize]}
|
||||
value={[settings.gridItemSize]}
|
||||
max={200}
|
||||
step={10}
|
||||
min={60}
|
||||
|
|
|
@ -50,6 +50,7 @@ import { Conditional } from '../ContextMenu/ConditionalItem';
|
|||
import { FileThumb } from '../FilePath/Thumb';
|
||||
import { SingleItemMetadata } from '../Inspector';
|
||||
import { explorerStore } from '../store';
|
||||
import { useExplorerViewContext } from '../View/Context';
|
||||
import { ImageSlider } from './ImageSlider';
|
||||
import { getQuickPreviewStore, useQuickPreviewStore } from './store';
|
||||
|
||||
|
@ -76,6 +77,7 @@ export const QuickPreview = () => {
|
|||
const { openFilePaths, openEphemeralFiles } = usePlatform();
|
||||
const explorerLayoutStore = useExplorerLayoutStore();
|
||||
const explorer = useExplorerContext();
|
||||
const explorerView = useExplorerViewContext();
|
||||
const { open, itemIndex } = useQuickPreviewStore();
|
||||
|
||||
const thumb = createRef<HTMLDivElement>();
|
||||
|
@ -159,6 +161,14 @@ export const QuickPreview = () => {
|
|||
setShowMetadata(false);
|
||||
}, [item, open]);
|
||||
|
||||
useEffect(() => {
|
||||
if (open) explorerView.updateActiveItem(null, { updateFirstItem: true });
|
||||
|
||||
// "open" is excluded, as we only want this to trigger when hashes change,
|
||||
// that way we don't have to manually update the active item.
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [explorer.selectedItemHashes, explorerView.updateActiveItem]);
|
||||
|
||||
const handleMoveBetweenItems = (step: number) => {
|
||||
const nextPreviewItem = items[itemIndex + step];
|
||||
if (nextPreviewItem) {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue