From 338766a2c56535bf080c53c6c86a37c574456486 Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Tue, 14 Oct 2025 11:35:25 +0200 Subject: [PATCH 01/22] worktree: Add a binary for benchmarking the initial worktree scan of a project Co-authored-by: Smit Barmase --- crates/worktree/Cargo.toml | 4 ++ crates/worktree/bin/bench_background_scan.rs | 47 ++++++++++++++++++++ 2 files changed, 51 insertions(+) create mode 100644 crates/worktree/bin/bench_background_scan.rs diff --git a/crates/worktree/Cargo.toml b/crates/worktree/Cargo.toml index fdeca37b7ac737..69aef5577c0d7d 100644 --- a/crates/worktree/Cargo.toml +++ b/crates/worktree/Cargo.toml @@ -9,6 +9,10 @@ license = "GPL-3.0-or-later" path = "src/worktree.rs" doctest = false +[[bin]] +name = "bench_background_scan" +path = "bin/bench_background_scan.rs" + [lints] workspace = true diff --git a/crates/worktree/bin/bench_background_scan.rs b/crates/worktree/bin/bench_background_scan.rs new file mode 100644 index 00000000000000..8f789fd6162444 --- /dev/null +++ b/crates/worktree/bin/bench_background_scan.rs @@ -0,0 +1,47 @@ +use std::{ + path::Path, + sync::{Arc, atomic::AtomicUsize}, +}; + +use fs::RealFs; +use gpui::Application; +use settings::Settings; +use worktree::{Worktree, WorktreeSettings}; + +fn main() { + let Some(worktree_root_path) = std::env::args().nth(1) else { + println!( + "Missing path to worktree root\nUsage: bench_background_scan PATH_TO_WORKTREE_ROOT" + ); + return; + }; + let app = Application::headless(); + + app.run(|cx| { + settings::init(cx); + WorktreeSettings::register(cx); + let fs = Arc::new(RealFs::new(None, cx.background_executor().clone())); + + cx.spawn(async move |cx| { + let worktree = Worktree::local( + Path::new(&worktree_root_path), + true, + fs, + Arc::new(AtomicUsize::new(0)), + cx, + ) + .await + .expect("Worktree initialization to succeed"); + let did_finish_scan = worktree + .update(cx, |this, _| this.as_local().unwrap().scan_complete()) + .unwrap(); + let start = std::time::Instant::now(); + did_finish_scan.await; + println!("{:?}", start.elapsed()); + cx.update(|cx| { + cx.quit(); + }) + }) + .detach(); + }) +} From 547e62718b269184aafb5774862c32d69c53db6e Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Tue, 14 Oct 2025 13:07:43 +0200 Subject: [PATCH 02/22] Add file/dir count to benchmarks --- crates/worktree/bin/bench_background_scan.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/crates/worktree/bin/bench_background_scan.rs b/crates/worktree/bin/bench_background_scan.rs index 8f789fd6162444..ca86687aff2d4f 100644 --- a/crates/worktree/bin/bench_background_scan.rs +++ b/crates/worktree/bin/bench_background_scan.rs @@ -37,7 +37,14 @@ fn main() { .unwrap(); let start = std::time::Instant::now(); did_finish_scan.await; - println!("{:?}", start.elapsed()); + let elapsed = start.elapsed(); + let (files, directories) = worktree + .read_with(cx, |this, _| (this.file_count(), this.dir_count())) + .unwrap(); + println!( + "{:?} for {directories} directories and {files} files", + elapsed + ); cx.update(|cx| { cx.quit(); }) From 78cf23a80c436b6a9c3ac58ad3b8622fc1c5c89e Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Wed, 15 Oct 2025 18:56:06 +0200 Subject: [PATCH 03/22] Add simple benchmarks for fs operations --- Cargo.lock | 9 +++++++++ Cargo.toml | 2 +- crates/fs_benchmarks/Cargo.toml | 13 +++++++++++++ crates/fs_benchmarks/src/main.rs | 32 ++++++++++++++++++++++++++++++++ 4 files changed, 55 insertions(+), 1 deletion(-) create mode 100644 crates/fs_benchmarks/Cargo.toml create mode 100644 crates/fs_benchmarks/src/main.rs diff --git a/Cargo.lock b/Cargo.lock index 35c3c72be7561d..561ff3ec17d2b8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6528,6 +6528,15 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "fs_benchmarks" +version = "0.1.0" +dependencies = [ + "fs", + "gpui", + "workspace-hack", +] + [[package]] name = "fs_extra" version = "1.3.0" diff --git a/Cargo.toml b/Cargo.toml index 3e4d3d18483583..1229a4029a06f7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -222,7 +222,7 @@ members = [ "tooling/perf", "tooling/workspace-hack", - "tooling/xtask", + "tooling/xtask", "crates/fs_benchmarks", ] default-members = ["crates/zed"] diff --git a/crates/fs_benchmarks/Cargo.toml b/crates/fs_benchmarks/Cargo.toml new file mode 100644 index 00000000000000..2372db36c89428 --- /dev/null +++ b/crates/fs_benchmarks/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "fs_benchmarks" +version = "0.1.0" +publish.workspace = true +edition.workspace = true + +[dependencies] +fs.workspace = true +gpui = {workspace = true, features = ["windows-manifest"]} +workspace-hack.workspace = true + +[lints] +workspace = true diff --git a/crates/fs_benchmarks/src/main.rs b/crates/fs_benchmarks/src/main.rs new file mode 100644 index 00000000000000..12df32f0763e02 --- /dev/null +++ b/crates/fs_benchmarks/src/main.rs @@ -0,0 +1,32 @@ +use fs::Fs; +use gpui::{AppContext, Application}; +fn main() { + let Some(path_to_read) = std::env::args().nth(1) else { + println!("Expected path to read as 1st argument."); + return; + }; + + let _ = Application::headless().run(|cx| { + let fs = fs::RealFs::new(None, cx.background_executor().clone()); + cx.background_spawn(async move { + let timer = std::time::Instant::now(); + let result = fs.load_bytes(path_to_read.as_ref()).await; + let elapsed = timer.elapsed(); + if let Err(e) = result { + println!("Failed `load_bytes` after {elapsed:?} with error `{e}`"); + } else { + println!("Took {elapsed:?} to read {} bytes", result.unwrap().len()); + }; + let timer = std::time::Instant::now(); + let result = fs.metadata(path_to_read.as_ref()).await; + let elapsed = timer.elapsed(); + if let Err(e) = result { + println!("Failed `metadata` after {elapsed:?} with error `{e}`"); + } else { + println!("Took {elapsed:?} to query metadata"); + }; + std::process::exit(0); + }) + .detach(); + }); +} From dc5eb24e3d5c6f5cced6f354ad17ff030a6b399b Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Wed, 15 Oct 2025 20:33:27 +0200 Subject: [PATCH 04/22] Move worktree benchmark into a separate crate --- Cargo.lock | 10 ++++++++++ Cargo.toml | 2 +- crates/worktree/Cargo.toml | 4 ---- crates/worktree_benchmarks/Cargo.toml | 14 ++++++++++++++ .../src/main.rs} | 0 5 files changed, 25 insertions(+), 5 deletions(-) create mode 100644 crates/worktree_benchmarks/Cargo.toml rename crates/{worktree/bin/bench_background_scan.rs => worktree_benchmarks/src/main.rs} (100%) diff --git a/Cargo.lock b/Cargo.lock index 561ff3ec17d2b8..6d353f65670214 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -20888,6 +20888,16 @@ dependencies = [ "zlog", ] +[[package]] +name = "worktree_benchmarks" +version = "0.1.0" +dependencies = [ + "fs", + "gpui", + "settings", + "worktree", +] + [[package]] name = "write16" version = "1.0.0" diff --git a/Cargo.toml b/Cargo.toml index 1229a4029a06f7..99f06f4ba42d11 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -222,7 +222,7 @@ members = [ "tooling/perf", "tooling/workspace-hack", - "tooling/xtask", "crates/fs_benchmarks", + "tooling/xtask", "crates/fs_benchmarks", "crates/worktree_benchmarks", ] default-members = ["crates/zed"] diff --git a/crates/worktree/Cargo.toml b/crates/worktree/Cargo.toml index 69aef5577c0d7d..fdeca37b7ac737 100644 --- a/crates/worktree/Cargo.toml +++ b/crates/worktree/Cargo.toml @@ -9,10 +9,6 @@ license = "GPL-3.0-or-later" path = "src/worktree.rs" doctest = false -[[bin]] -name = "bench_background_scan" -path = "bin/bench_background_scan.rs" - [lints] workspace = true diff --git a/crates/worktree_benchmarks/Cargo.toml b/crates/worktree_benchmarks/Cargo.toml new file mode 100644 index 00000000000000..29681573adc9da --- /dev/null +++ b/crates/worktree_benchmarks/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "worktree_benchmarks" +version = "0.1.0" +publish.workspace = true +edition.workspace = true + +[dependencies] +fs.workspace = true +gpui = { workspace = true, features = ["windows-manifest"] } +settings.workspace = true +worktree.workspace = true + +[lints] +workspace = true diff --git a/crates/worktree/bin/bench_background_scan.rs b/crates/worktree_benchmarks/src/main.rs similarity index 100% rename from crates/worktree/bin/bench_background_scan.rs rename to crates/worktree_benchmarks/src/main.rs From fcb243273a3e61030872e33e6e6e8a9b85f2473e Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Mon, 13 Oct 2025 18:14:01 +0200 Subject: [PATCH 05/22] fs: Replace smol in RealFs::load with background executor --- crates/fs/src/fs.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index 81483ce56ce270..1b99a00957f42a 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -562,7 +562,10 @@ impl Fs for RealFs { async fn load(&self, path: &Path) -> Result { let path = path.to_path_buf(); - let text = smol::unblock(|| std::fs::read_to_string(path)).await?; + let text = self + .executor + .spawn(async move { std::fs::read_to_string(path) }) + .await?; Ok(text) } async fn load_bytes(&self, path: &Path) -> Result> { From 5ea65bfe936765ed292e6e7878fe61fc67800e8e Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Mon, 13 Oct 2025 21:51:49 +0200 Subject: [PATCH 06/22] Fix a bunch of tests --- crates/fs/src/fs.rs | 10 ++++---- crates/worktree/src/worktree.rs | 35 ++++++++++++++++++--------- crates/worktree/src/worktree_tests.rs | 3 +-- 3 files changed, 29 insertions(+), 19 deletions(-) diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index 1b99a00957f42a..0ca116ecb6d472 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -11,6 +11,7 @@ use gpui::App; use gpui::BackgroundExecutor; use gpui::Global; use gpui::ReadGlobal as _; +use gpui::Task; use std::borrow::Cow; use util::command::new_smol_command; @@ -562,12 +563,11 @@ impl Fs for RealFs { async fn load(&self, path: &Path) -> Result { let path = path.to_path_buf(); - let text = self - .executor - .spawn(async move { std::fs::read_to_string(path) }) - .await?; - Ok(text) + self.executor + .spawn(async move { Ok(std::fs::read_to_string(path)?) }) + .await } + async fn load_bytes(&self, path: &Path) -> Result> { let path = path.to_path_buf(); let bytes = smol::unblock(|| std::fs::read(path)).await?; diff --git a/crates/worktree/src/worktree.rs b/crates/worktree/src/worktree.rs index 003aeb133b2056..7eaf29877bb1f9 100644 --- a/crates/worktree/src/worktree.rs +++ b/crates/worktree/src/worktree.rs @@ -226,7 +226,7 @@ impl Default for WorkDirectory { } } -#[derive(Debug, Clone)] +#[derive(Clone)] pub struct LocalSnapshot { snapshot: Snapshot, global_gitignore: Option>, @@ -239,6 +239,7 @@ pub struct LocalSnapshot { /// The file handle of the worktree root. `None` if the worktree is a directory. /// (so we can find it after it's been moved) root_file_handle: Option>, + executor: BackgroundExecutor, } struct BackgroundScannerState { @@ -321,7 +322,6 @@ impl DerefMut for LocalSnapshot { } } -#[derive(Debug)] enum ScanState { Started, Updated { @@ -402,6 +402,7 @@ impl Worktree { PathStyle::local(), ), root_file_handle, + executor: cx.background_executor().clone(), }; let worktree_id = snapshot.id(); @@ -2442,7 +2443,7 @@ impl LocalSnapshot { log::trace!("insert entry {:?}", entry.path); if entry.is_file() && entry.path.file_name() == Some(&GITIGNORE) { let abs_path = self.absolutize(&entry.path); - match smol::block_on(build_gitignore(&abs_path, fs)) { + match self.executor.block(build_gitignore(&abs_path, fs)) { Ok(ignore) => { self.ignores_by_parent_abs_path .insert(abs_path.parent().unwrap().into(), (Arc::new(ignore), true)); @@ -2504,7 +2505,10 @@ impl LocalSnapshot { new_ignores.push((ancestor, None)); } } - let metadata = smol::block_on(fs.metadata(&ancestor.join(DOT_GIT))) + + let metadata = self + .executor + .block(fs.metadata(&ancestor.join(DOT_GIT))) .ok() .flatten(); if metadata.is_some() { @@ -2899,7 +2903,7 @@ impl BackgroundScannerState { let work_directory_abs_path = self.snapshot.work_directory_abs_path(&work_directory); let (repository_dir_abs_path, common_dir_abs_path) = - discover_git_paths(&dot_git_abs_path, fs); + discover_git_paths(&dot_git_abs_path, fs, &self.snapshot.executor); watcher .add(&common_dir_abs_path) .context("failed to add common directory to watcher") @@ -3819,7 +3823,7 @@ impl BackgroundScanner { let mut is_git_related = false; let dot_git_paths = abs_path.as_path().ancestors().find_map(|ancestor| { - if smol::block_on(is_git_dir(ancestor, self.fs.as_ref())) { + if snapshot.executor.block(is_git_dir(ancestor, self.fs.as_ref())) { let path_in_git_dir = abs_path .as_path() .strip_prefix(ancestor) @@ -4606,7 +4610,9 @@ impl BackgroundScanner { return; }; - if let Ok(Some(metadata)) = smol::block_on(self.fs.metadata(&job.abs_path.join(DOT_GIT))) + if let Ok(Some(metadata)) = self + .executor + .block(self.fs.metadata(&job.abs_path.join(DOT_GIT))) && metadata.is_dir { ignore_stack.repo_root = Some(job.abs_path.clone()); @@ -4739,7 +4745,8 @@ impl BackgroundScanner { if exists_in_snapshot || matches!( - smol::block_on(self.fs.metadata(&entry.common_dir_abs_path)), + self.executor + .block(self.fs.metadata(&entry.common_dir_abs_path)), Ok(Some(_)) ) { @@ -5498,11 +5505,16 @@ fn parse_gitfile(content: &str) -> anyhow::Result<&Path> { Ok(Path::new(path.trim())) } -fn discover_git_paths(dot_git_abs_path: &Arc, fs: &dyn Fs) -> (Arc, Arc) { +fn discover_git_paths( + dot_git_abs_path: &Arc, + fs: &dyn Fs, + executor: &BackgroundExecutor, +) -> (Arc, Arc) { let mut repository_dir_abs_path = dot_git_abs_path.clone(); let mut common_dir_abs_path = dot_git_abs_path.clone(); - if let Some(path) = smol::block_on(fs.load(dot_git_abs_path)) + if let Some(path) = executor + .block(fs.load(dot_git_abs_path)) .ok() .as_ref() .and_then(|contents| parse_gitfile(contents).log_err()) @@ -5511,7 +5523,7 @@ fn discover_git_paths(dot_git_abs_path: &Arc, fs: &dyn Fs) -> (Arc, .parent() .unwrap_or(Path::new("")) .join(path); - if let Some(path) = smol::block_on(fs.canonicalize(&path)).log_err() { + if let Some(path) = executor.block(fs.canonicalize(&path)).log_err() { repository_dir_abs_path = Path::new(&path).into(); common_dir_abs_path = repository_dir_abs_path.clone(); if let Some(commondir_contents) = smol::block_on(fs.load(&path.join("commondir"))).ok() @@ -5522,6 +5534,5 @@ fn discover_git_paths(dot_git_abs_path: &Arc, fs: &dyn Fs) -> (Arc, } } }; - (repository_dir_abs_path, common_dir_abs_path) } diff --git a/crates/worktree/src/worktree_tests.rs b/crates/worktree/src/worktree_tests.rs index 3c39d5c3ad7056..d89e1ef4e4df7d 100644 --- a/crates/worktree/src/worktree_tests.rs +++ b/crates/worktree/src/worktree_tests.rs @@ -734,7 +734,6 @@ async fn test_write_file(cx: &mut TestAppContext) { }) .await .unwrap(); - worktree.read_with(cx, |tree, _| { let tracked = tree .entry_for_path(rel_path("tracked-dir/file.txt")) @@ -1537,7 +1536,7 @@ async fn test_random_worktree_operations_during_initial_scan( assert_eq!( updated_snapshot.entries(true, 0).collect::>(), final_snapshot.entries(true, 0).collect::>(), - "wrong updates after snapshot {i}: {snapshot:#?} {updates:#?}", + "wrong updates after snapshot {i}: {updates:#?}", ); } } From 058d5b7f78f3a503c145275461b1571f2802054d Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Mon, 13 Oct 2025 23:34:46 +0200 Subject: [PATCH 07/22] Move worktree background scanner to use async locks --- Cargo.lock | 28 ++++--- Cargo.toml | 1 + crates/fs/src/fs.rs | 5 +- crates/worktree/Cargo.toml | 1 + crates/worktree/src/worktree.rs | 136 ++++++++++++++++++-------------- 5 files changed, 101 insertions(+), 70 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6d353f65670214..7c8c16ce12e997 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1166,7 +1166,7 @@ version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09f7e37c0ed80b2a977691c47dae8625cfb21e205827106c64f7c588766b2e50" dependencies = [ - "async-lock", + "async-lock 3.4.1", "blocking", "futures-lite 2.6.0", ] @@ -1180,7 +1180,7 @@ dependencies = [ "async-channel 2.3.1", "async-executor", "async-io", - "async-lock", + "async-lock 3.4.1", "blocking", "futures-lite 2.6.0", "once_cell", @@ -1192,7 +1192,7 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19634d6336019ef220f09fd31168ce5c184b295cbf80345437cc36094ef223ca" dependencies = [ - "async-lock", + "async-lock 3.4.1", "cfg-if", "concurrent-queue", "futures-io", @@ -1204,6 +1204,15 @@ dependencies = [ "windows-sys 0.60.2", ] +[[package]] +name = "async-lock" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" +dependencies = [ + "event-listener 2.5.3", +] + [[package]] name = "async-lock" version = "3.4.1" @@ -1243,7 +1252,7 @@ checksum = "63255f1dc2381611000436537bbedfe83183faa303a5a0edaf191edef06526bb" dependencies = [ "async-channel 2.3.1", "async-io", - "async-lock", + "async-lock 3.4.1", "async-signal", "async-task", "blocking", @@ -1272,7 +1281,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "637e00349800c0bdf8bfc21ebbc0b6524abea702b0da4168ac00d070d0c0b9f3" dependencies = [ "async-io", - "async-lock", + "async-lock 3.4.1", "atomic-waker", "cfg-if", "futures-core", @@ -1293,7 +1302,7 @@ dependencies = [ "async-channel 1.9.0", "async-global-executor", "async-io", - "async-lock", + "async-lock 3.4.1", "async-process", "crossbeam-utils", "futures-channel", @@ -11043,7 +11052,7 @@ dependencies = [ "ashpd 0.12.0", "async-fs", "async-io", - "async-lock", + "async-lock 3.4.1", "blocking", "cbc", "cipher", @@ -15744,7 +15753,7 @@ dependencies = [ "async-executor", "async-fs", "async-io", - "async-lock", + "async-lock 3.4.1", "async-net", "async-process", "blocking", @@ -20858,6 +20867,7 @@ name = "worktree" version = "0.1.0" dependencies = [ "anyhow", + "async-lock 2.8.0", "clock", "collections", "fs", @@ -21169,7 +21179,7 @@ dependencies = [ "async-broadcast", "async-executor", "async-io", - "async-lock", + "async-lock 3.4.1", "async-process", "async-recursion", "async-task", diff --git a/Cargo.toml b/Cargo.toml index 99f06f4ba42d11..5482e20a1b2230 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -455,6 +455,7 @@ async-compat = "0.2.1" async-compression = { version = "0.4", features = ["gzip", "futures-io"] } async-dispatcher = "0.1" async-fs = "2.1" +async-lock = "2.1" async-pipe = { git = "https://github.com/zed-industries/async-pipe-rs", rev = "82d00a04211cf4e1236029aa03e6b6ce2a74c553" } async-recursion = "1.0.0" async-tar = "0.5.0" diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index 0ca116ecb6d472..28e224d35da8e2 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -570,7 +570,10 @@ impl Fs for RealFs { async fn load_bytes(&self, path: &Path) -> Result> { let path = path.to_path_buf(); - let bytes = smol::unblock(|| std::fs::read(path)).await?; + let bytes = self + .executor + .spawn(async move { std::fs::read(path) }) + .await?; Ok(bytes) } diff --git a/crates/worktree/Cargo.toml b/crates/worktree/Cargo.toml index fdeca37b7ac737..611091c11ff724 100644 --- a/crates/worktree/Cargo.toml +++ b/crates/worktree/Cargo.toml @@ -24,6 +24,7 @@ test-support = [ [dependencies] anyhow.workspace = true +async-lock.workspace = true clock.workspace = true collections.workspace = true fs.workspace = true diff --git a/crates/worktree/src/worktree.rs b/crates/worktree/src/worktree.rs index 7eaf29877bb1f9..1e3404c9096aa0 100644 --- a/crates/worktree/src/worktree.rs +++ b/crates/worktree/src/worktree.rs @@ -64,7 +64,7 @@ use std::{ use sum_tree::{Bias, Dimensions, Edit, KeyedItem, SeekTarget, SumTree, Summary, TreeMap, TreeSet}; use text::{LineEnding, Rope}; use util::{ - ResultExt, debug_panic, + ResultExt, debug_panic, maybe, paths::{PathMatcher, PathStyle, SanitizedPath, home_dir}, rel_path::RelPath, }; @@ -1070,7 +1070,7 @@ impl LocalWorktree { scan_requests_rx, path_prefixes_to_scan_rx, next_entry_id, - state: Mutex::new(BackgroundScannerState { + state: async_lock::Mutex::new(BackgroundScannerState { prev_snapshot: snapshot.snapshot.clone(), snapshot, scanned_dirs: Default::default(), @@ -3547,7 +3547,7 @@ impl<'a> sum_tree::Dimension<'a, EntrySummary> for PathKey { } struct BackgroundScanner { - state: Mutex, + state: async_lock::Mutex, fs: Arc, fs_case_sensitive: bool, status_updates_tx: UnboundedSender, @@ -3573,31 +3573,38 @@ impl BackgroundScanner { // If the worktree root does not contain a git repository, then find // the git repository in an ancestor directory. Find any gitignore files // in ancestor directories. - let root_abs_path = self.state.lock().snapshot.abs_path.clone(); + let root_abs_path = self.state.lock().await.snapshot.abs_path.clone(); let (ignores, repo) = discover_ancestor_git_repo(self.fs.clone(), &root_abs_path).await; self.state .lock() + .await .snapshot .ignores_by_parent_abs_path .extend(ignores); - let containing_git_repository = repo.and_then(|(ancestor_dot_git, work_directory)| { - self.state - .lock() - .insert_git_repository_for_path( - work_directory, - ancestor_dot_git.clone().into(), - self.fs.as_ref(), - self.watcher.as_ref(), - ) - .log_err()?; - Some(ancestor_dot_git) - }); + let containing_git_repository = if let Some((ancestor_dot_git, work_directory)) = repo { + maybe!(async { + self.state + .lock() + .await + .insert_git_repository_for_path( + work_directory, + ancestor_dot_git.clone().into(), + self.fs.as_ref(), + self.watcher.as_ref(), + ) + .log_err()?; + Some(ancestor_dot_git) + }) + .await + } else { + None + }; log::trace!("containing git repository: {containing_git_repository:?}"); let mut global_gitignore_events = if let Some(global_gitignore_path) = &paths::global_gitignore_path() { - self.state.lock().snapshot.global_gitignore = + self.state.lock().await.snapshot.global_gitignore = if self.fs.is_file(&global_gitignore_path).await { build_gitignore(global_gitignore_path, self.fs.as_ref()) .await @@ -3611,13 +3618,13 @@ impl BackgroundScanner { .await .0 } else { - self.state.lock().snapshot.global_gitignore = None; + self.state.lock().await.snapshot.global_gitignore = None; Box::pin(futures::stream::empty()) }; let (scan_job_tx, scan_job_rx) = channel::unbounded(); { - let mut state = self.state.lock(); + let mut state = self.state.lock().await; state.snapshot.scan_id += 1; if let Some(mut root_entry) = state.snapshot.root_entry().cloned() { let ignore_stack = state.snapshot.ignore_stack_for_abs_path( @@ -3644,11 +3651,11 @@ impl BackgroundScanner { drop(scan_job_tx); self.scan_dirs(true, scan_job_rx).await; { - let mut state = self.state.lock(); + let mut state = self.state.lock().await; state.snapshot.completed_scan_id = state.snapshot.scan_id; } - self.send_status_update(false, SmallVec::new()); + self.send_status_update(false, SmallVec::new()).await; // Process any any FS events that occurred while performing the initial scan. // For these events, update events cannot be as precise, because we didn't @@ -3693,7 +3700,7 @@ impl BackgroundScanner { if did_scan { let abs_path = { - let mut state = self.state.lock(); + let mut state = self.state.lock().await; state.path_prefixes_to_scan.insert(request.path.clone()); state.snapshot.absolutize(&request.path) }; @@ -3702,7 +3709,7 @@ impl BackgroundScanner { self.process_events(vec![abs_path]).await; } } - self.send_status_update(false, request.done); + self.send_status_update(false, request.done).await; } paths = fs_events_rx.next().fuse() => { @@ -3731,7 +3738,7 @@ impl BackgroundScanner { request.relative_paths.sort_unstable(); self.forcibly_load_paths(&request.relative_paths).await; - let root_path = self.state.lock().snapshot.abs_path.clone(); + let root_path = self.state.lock().await.snapshot.abs_path.clone(); let root_canonical_path = self.fs.canonicalize(root_path.as_path()).await; let root_canonical_path = match &root_canonical_path { Ok(path) => SanitizedPath::new(path), @@ -3753,7 +3760,7 @@ impl BackgroundScanner { .collect::>(); { - let mut state = self.state.lock(); + let mut state = self.state.lock().await; let is_idle = state.snapshot.completed_scan_id == state.snapshot.scan_id; state.snapshot.scan_id += 1; if is_idle { @@ -3770,12 +3777,12 @@ impl BackgroundScanner { ) .await; - self.send_status_update(scanning, request.done) + self.send_status_update(scanning, request.done).await } async fn process_events(&self, mut abs_paths: Vec) { log::trace!("process events: {abs_paths:?}"); - let root_path = self.state.lock().snapshot.abs_path.clone(); + let root_path = self.state.lock().await.snapshot.abs_path.clone(); let root_canonical_path = self.fs.canonicalize(root_path.as_path()).await; let root_canonical_path = match &root_canonical_path { Ok(path) => SanitizedPath::new(path), @@ -3783,6 +3790,7 @@ impl BackgroundScanner { let new_path = self .state .lock() + .await .snapshot .root_file_handle .clone() @@ -3815,10 +3823,12 @@ impl BackgroundScanner { let mut dot_git_abs_paths = Vec::new(); abs_paths.sort_unstable(); abs_paths.dedup_by(|a, b| a.starts_with(b)); - abs_paths.retain(|abs_path| { + { + let snapshot = &self.state.lock().await.snapshot; + abs_paths.retain(|abs_path| { let abs_path = &SanitizedPath::new(abs_path); - let snapshot = &self.state.lock().snapshot; + { let mut is_git_related = false; @@ -3905,12 +3915,12 @@ impl BackgroundScanner { true } }); - + } if relative_paths.is_empty() && dot_git_abs_paths.is_empty() { return; } - self.state.lock().snapshot.scan_id += 1; + self.state.lock().await.snapshot.scan_id += 1; let (scan_job_tx, scan_job_rx) = channel::unbounded(); log::debug!("received fs events {:?}", relative_paths); @@ -3924,29 +3934,29 @@ impl BackgroundScanner { .await; let affected_repo_roots = if !dot_git_abs_paths.is_empty() { - self.update_git_repositories(dot_git_abs_paths) + self.update_git_repositories(dot_git_abs_paths).await } else { Vec::new() }; { - let mut ignores_to_update = self.ignores_needing_update(); + let mut ignores_to_update = self.ignores_needing_update().await; ignores_to_update.extend(affected_repo_roots); - let ignores_to_update = self.order_ignores(ignores_to_update); - let snapshot = self.state.lock().snapshot.clone(); + let ignores_to_update = self.order_ignores(ignores_to_update).await; + let snapshot = self.state.lock().await.snapshot.clone(); self.update_ignore_statuses_for_paths(scan_job_tx, snapshot, ignores_to_update) .await; self.scan_dirs(false, scan_job_rx).await; } { - let mut state = self.state.lock(); + let mut state = self.state.lock().await; state.snapshot.completed_scan_id = state.snapshot.scan_id; for (_, entry) in mem::take(&mut state.removed_entries) { state.scanned_dirs.remove(&entry.id); } } - self.send_status_update(false, SmallVec::new()); + self.send_status_update(false, SmallVec::new()).await; } async fn update_global_gitignore(&self, abs_path: &Path) { @@ -3955,7 +3965,7 @@ impl BackgroundScanner { .log_err() .map(Arc::new); let (prev_snapshot, ignore_stack, abs_path) = { - let mut state = self.state.lock(); + let mut state = self.state.lock().await; state.snapshot.global_gitignore = ignore; let abs_path = state.snapshot.abs_path().clone(); let ignore_stack = @@ -3972,13 +3982,13 @@ impl BackgroundScanner { ) .await; self.scan_dirs(false, scan_job_rx).await; - self.send_status_update(false, SmallVec::new()); + self.send_status_update(false, SmallVec::new()).await; } async fn forcibly_load_paths(&self, paths: &[Arc]) -> bool { let (scan_job_tx, scan_job_rx) = channel::unbounded(); { - let mut state = self.state.lock(); + let mut state = self.state.lock().await; let root_path = state.snapshot.abs_path.clone(); for path in paths { for ancestor in path.ancestors() { @@ -4003,7 +4013,7 @@ impl BackgroundScanner { self.scan_dir(&job).await.log_err(); } - !mem::take(&mut self.state.lock().paths_to_scan).is_empty() + !mem::take(&mut self.state.lock().await.paths_to_scan).is_empty() } async fn scan_dirs( @@ -4051,7 +4061,7 @@ impl BackgroundScanner { ) { Ok(_) => { last_progress_update_count += 1; - self.send_status_update(true, SmallVec::new()); + self.send_status_update(true, SmallVec::new()).await; } Err(count) => { last_progress_update_count = count; @@ -4076,8 +4086,12 @@ impl BackgroundScanner { .await; } - fn send_status_update(&self, scanning: bool, barrier: SmallVec<[barrier::Sender; 1]>) -> bool { - let mut state = self.state.lock(); + async fn send_status_update( + &self, + scanning: bool, + barrier: SmallVec<[barrier::Sender; 1]>, + ) -> bool { + let mut state = self.state.lock().await; if state.changed_paths.is_empty() && scanning { return true; } @@ -4106,7 +4120,7 @@ impl BackgroundScanner { let root_abs_path; let root_char_bag; { - let snapshot = &self.state.lock().snapshot; + let snapshot = &self.state.lock().await.snapshot; if self.settings.is_path_excluded(&job.path) { log::error!("skipping excluded directory {:?}", job.path); return Ok(()); @@ -4159,7 +4173,7 @@ impl BackgroundScanner { }; if child_name == DOT_GIT { - let mut state = self.state.lock(); + let mut state = self.state.lock().await; state.insert_git_repository( child_path.clone(), self.fs.as_ref(), @@ -4184,7 +4198,7 @@ impl BackgroundScanner { if self.settings.is_path_excluded(&child_path) { log::debug!("skipping excluded child entry {child_path:?}"); - self.state.lock().remove_path(&child_path); + self.state.lock().await.remove_path(&child_path); continue; } @@ -4284,7 +4298,7 @@ impl BackgroundScanner { new_entries.push(child_entry); } - let mut state = self.state.lock(); + let mut state = self.state.lock().await; // Identify any subdirectories that should not be scanned. let mut job_ix = 0; @@ -4366,7 +4380,7 @@ impl BackgroundScanner { None }; - let mut state = self.state.lock(); + let mut state = self.state.lock().await; let doing_recursive_update = scan_queue_tx.is_some(); // Remove any entries for paths that no longer exist or are being recursively @@ -4527,11 +4541,11 @@ impl BackgroundScanner { .await; } - fn ignores_needing_update(&self) -> Vec> { + async fn ignores_needing_update(&self) -> Vec> { let mut ignores_to_update = Vec::new(); { - let snapshot = &mut self.state.lock().snapshot; + let snapshot = &mut self.state.lock().await.snapshot; let abs_path = snapshot.abs_path.clone(); snapshot .ignores_by_parent_abs_path @@ -4559,12 +4573,12 @@ impl BackgroundScanner { ignores_to_update } - fn order_ignores( + async fn order_ignores( &self, mut ignores: Vec>, ) -> impl use<> + Iterator, IgnoreStack)> { let fs = self.fs.clone(); - let snapshot = self.state.lock().snapshot.clone(); + let snapshot = self.state.lock().await.snapshot.clone(); ignores.sort_unstable(); let mut ignores_to_update = ignores.into_iter().peekable(); std::iter::from_fn(move || { @@ -4632,7 +4646,7 @@ impl BackgroundScanner { // Scan any directories that were previously ignored and weren't previously scanned. if was_ignored && !entry.is_ignored && entry.kind.is_unloaded() { - let state = self.state.lock(); + let state = self.state.lock().await; if state.should_scan_directory(&entry) { state.enqueue_scan_dir( abs_path.clone(), @@ -4663,7 +4677,7 @@ impl BackgroundScanner { } } - let state = &mut self.state.lock(); + let state = &mut self.state.lock().await; for edit in &entries_by_path_edits { if let Edit::Insert(entry) = edit && let Err(ix) = state.changed_paths.binary_search(&entry.path) @@ -4679,9 +4693,9 @@ impl BackgroundScanner { state.snapshot.entries_by_id.edit(entries_by_id_edits, ()); } - fn update_git_repositories(&self, dot_git_paths: Vec) -> Vec> { + async fn update_git_repositories(&self, dot_git_paths: Vec) -> Vec> { log::trace!("reloading repositories: {dot_git_paths:?}"); - let mut state = self.state.lock(); + let mut state = self.state.lock().await; let scan_id = state.snapshot.scan_id; let mut affected_repo_roots = Vec::new(); for dot_git_dir in dot_git_paths { @@ -5526,9 +5540,11 @@ fn discover_git_paths( if let Some(path) = executor.block(fs.canonicalize(&path)).log_err() { repository_dir_abs_path = Path::new(&path).into(); common_dir_abs_path = repository_dir_abs_path.clone(); - if let Some(commondir_contents) = smol::block_on(fs.load(&path.join("commondir"))).ok() - && let Some(commondir_path) = - smol::block_on(fs.canonicalize(&path.join(commondir_contents.trim()))).log_err() + + if let Some(commondir_contents) = executor.block(fs.load(&path.join("commondir"))).ok() + && let Some(commondir_path) = executor + .block(fs.canonicalize(&path.join(commondir_contents.trim()))) + .log_err() { common_dir_abs_path = commondir_path.as_path().into(); } From edaf77e10b8b3c8c7559931dbfa0f2aae4f7ad4b Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Tue, 14 Oct 2025 00:01:21 +0200 Subject: [PATCH 08/22] Make worktree locks asynchronous --- crates/fs/src/fs.rs | 1 - crates/worktree/src/worktree.rs | 228 ++++++++++++++++++-------------- 2 files changed, 128 insertions(+), 101 deletions(-) diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index 28e224d35da8e2..a4c7115427f417 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -11,7 +11,6 @@ use gpui::App; use gpui::BackgroundExecutor; use gpui::Global; use gpui::ReadGlobal as _; -use gpui::Task; use std::borrow::Cow; use util::command::new_smol_command; diff --git a/crates/worktree/src/worktree.rs b/crates/worktree/src/worktree.rs index 1e3404c9096aa0..d3aa911ae27c97 100644 --- a/crates/worktree/src/worktree.rs +++ b/crates/worktree/src/worktree.rs @@ -2494,7 +2494,12 @@ impl LocalSnapshot { inodes } - fn ignore_stack_for_abs_path(&self, abs_path: &Path, is_dir: bool, fs: &dyn Fs) -> IgnoreStack { + async fn ignore_stack_for_abs_path( + &self, + abs_path: &Path, + is_dir: bool, + fs: &dyn Fs, + ) -> IgnoreStack { let mut new_ignores = Vec::new(); let mut repo_root = None; for (index, ancestor) in abs_path.ancestors().enumerate() { @@ -2506,11 +2511,7 @@ impl LocalSnapshot { } } - let metadata = self - .executor - .block(fs.metadata(&ancestor.join(DOT_GIT))) - .ok() - .flatten(); + let metadata = fs.metadata(&ancestor.join(DOT_GIT)).await.ok().flatten(); if metadata.is_some() { repo_root = Some(Arc::from(ancestor)); break; @@ -2656,7 +2657,7 @@ impl BackgroundScannerState { .any(|p| entry.path.starts_with(p)) } - fn enqueue_scan_dir( + async fn enqueue_scan_dir( &self, abs_path: Arc, entry: &Entry, @@ -2664,7 +2665,10 @@ impl BackgroundScannerState { fs: &dyn Fs, ) { let path = entry.path.clone(); - let ignore_stack = self.snapshot.ignore_stack_for_abs_path(&abs_path, true, fs); + let ignore_stack = self + .snapshot + .ignore_stack_for_abs_path(&abs_path, true, fs) + .await; let mut ancestor_inodes = self.snapshot.ancestor_inodes_for_path(&path); if !ancestor_inodes.contains(&entry.inode) { @@ -2702,11 +2706,17 @@ impl BackgroundScannerState { } } - fn insert_entry(&mut self, mut entry: Entry, fs: &dyn Fs, watcher: &dyn Watcher) -> Entry { + async fn insert_entry( + &mut self, + mut entry: Entry, + fs: &dyn Fs, + watcher: &dyn Watcher, + ) -> Entry { self.reuse_entry_id(&mut entry); let entry = self.snapshot.insert_entry(entry, fs); if entry.path.file_name() == Some(&DOT_GIT) { - self.insert_git_repository(entry.path.clone(), fs, watcher); + self.insert_git_repository(entry.path.clone(), fs, watcher) + .await; } #[cfg(test)] @@ -2837,7 +2847,7 @@ impl BackgroundScannerState { self.snapshot.check_invariants(false); } - fn insert_git_repository( + async fn insert_git_repository( &mut self, dot_git_path: Arc, fs: &dyn Fs, @@ -2878,10 +2888,11 @@ impl BackgroundScannerState { fs, watcher, ) + .await .log_err(); } - fn insert_git_repository_for_path( + async fn insert_git_repository_for_path( &mut self, work_directory: WorkDirectory, dot_git_abs_path: Arc, @@ -2903,7 +2914,7 @@ impl BackgroundScannerState { let work_directory_abs_path = self.snapshot.work_directory_abs_path(&work_directory); let (repository_dir_abs_path, common_dir_abs_path) = - discover_git_paths(&dot_git_abs_path, fs, &self.snapshot.executor); + discover_git_paths(&dot_git_abs_path, fs).await; watcher .add(&common_dir_abs_path) .context("failed to add common directory to watcher") @@ -3592,6 +3603,7 @@ impl BackgroundScanner { self.fs.as_ref(), self.watcher.as_ref(), ) + .await .log_err()?; Some(ancestor_dot_git) }) @@ -3627,22 +3639,25 @@ impl BackgroundScanner { let mut state = self.state.lock().await; state.snapshot.scan_id += 1; if let Some(mut root_entry) = state.snapshot.root_entry().cloned() { - let ignore_stack = state.snapshot.ignore_stack_for_abs_path( - root_abs_path.as_path(), - true, - self.fs.as_ref(), - ); + let ignore_stack = state + .snapshot + .ignore_stack_for_abs_path(root_abs_path.as_path(), true, self.fs.as_ref()) + .await; if ignore_stack.is_abs_path_ignored(root_abs_path.as_path(), true) { root_entry.is_ignored = true; - state.insert_entry(root_entry.clone(), self.fs.as_ref(), self.watcher.as_ref()); + state + .insert_entry(root_entry.clone(), self.fs.as_ref(), self.watcher.as_ref()) + .await; } if root_entry.is_dir() { - state.enqueue_scan_dir( - root_abs_path.as_path().into(), - &root_entry, - &scan_job_tx, - self.fs.as_ref(), - ); + state + .enqueue_scan_dir( + root_abs_path.as_path().into(), + &root_entry, + &scan_job_tx, + self.fs.as_ref(), + ) + .await; } } }; @@ -3832,17 +3847,22 @@ impl BackgroundScanner { { let mut is_git_related = false; - let dot_git_paths = abs_path.as_path().ancestors().find_map(|ancestor| { - if snapshot.executor.block(is_git_dir(ancestor, self.fs.as_ref())) { + let dot_git_paths = self.executor.block(maybe!(async { + let mut path = None; + for ancestor in abs_path.as_path().ancestors() { + + if is_git_dir(ancestor, self.fs.as_ref()).await { let path_in_git_dir = abs_path .as_path() .strip_prefix(ancestor) .expect("stripping off the ancestor"); - Some((ancestor.to_owned(), path_in_git_dir.to_owned())) - } else { - None + path = Some((ancestor.to_owned(), path_in_git_dir.to_owned())); + break; } - }); + } + path + + })); if let Some((dot_git_abs_path, path_in_git_dir)) = dot_git_paths { if skipped_files_in_dot_git @@ -3968,17 +3988,17 @@ impl BackgroundScanner { let mut state = self.state.lock().await; state.snapshot.global_gitignore = ignore; let abs_path = state.snapshot.abs_path().clone(); - let ignore_stack = - state - .snapshot - .ignore_stack_for_abs_path(&abs_path, true, self.fs.as_ref()); + let ignore_stack = state + .snapshot + .ignore_stack_for_abs_path(&abs_path, true, self.fs.as_ref()) + .await; (state.snapshot.clone(), ignore_stack, abs_path) }; let (scan_job_tx, scan_job_rx) = channel::unbounded(); self.update_ignore_statuses_for_paths( scan_job_tx, prev_snapshot, - vec![(abs_path, ignore_stack)].into_iter(), + vec![(abs_path, ignore_stack)], ) .await; self.scan_dirs(false, scan_job_rx).await; @@ -3996,12 +4016,14 @@ impl BackgroundScanner { && entry.kind == EntryKind::UnloadedDir { let abs_path = root_path.join(ancestor.as_std_path()); - state.enqueue_scan_dir( - abs_path.into(), - entry, - &scan_job_tx, - self.fs.as_ref(), - ); + state + .enqueue_scan_dir( + abs_path.into(), + entry, + &scan_job_tx, + self.fs.as_ref(), + ) + .await; state.paths_to_scan.insert(path.clone()); break; } @@ -4174,11 +4196,13 @@ impl BackgroundScanner { if child_name == DOT_GIT { let mut state = self.state.lock().await; - state.insert_git_repository( - child_path.clone(), - self.fs.as_ref(), - self.watcher.as_ref(), - ); + state + .insert_git_repository( + child_path.clone(), + self.fs.as_ref(), + self.watcher.as_ref(), + ) + .await; } else if child_name == GITIGNORE { match build_gitignore(&child_abs_path, self.fs.as_ref()).await { Ok(ignore) => { @@ -4396,11 +4420,10 @@ impl BackgroundScanner { let abs_path: Arc = root_abs_path.join(path.as_std_path()).into(); match metadata { Ok(Some((metadata, canonical_path))) => { - let ignore_stack = state.snapshot.ignore_stack_for_abs_path( - &abs_path, - metadata.is_dir, - self.fs.as_ref(), - ); + let ignore_stack = state + .snapshot + .ignore_stack_for_abs_path(&abs_path, metadata.is_dir, self.fs.as_ref()) + .await; let is_external = !canonical_path.starts_with(&root_canonical_path); let mut fs_entry = Entry::new( path.clone(), @@ -4432,18 +4455,22 @@ impl BackgroundScanner { || (fs_entry.path.is_empty() && abs_path.file_name() == Some(OsStr::new(DOT_GIT))) { - state.enqueue_scan_dir( - abs_path, - &fs_entry, - scan_queue_tx, - self.fs.as_ref(), - ); + state + .enqueue_scan_dir( + abs_path, + &fs_entry, + scan_queue_tx, + self.fs.as_ref(), + ) + .await; } else { fs_entry.kind = EntryKind::UnloadedDir; } } - state.insert_entry(fs_entry.clone(), self.fs.as_ref(), self.watcher.as_ref()); + state + .insert_entry(fs_entry.clone(), self.fs.as_ref(), self.watcher.as_ref()) + .await; if path.is_empty() && let Some((ignores, repo)) = new_ancestor_repo.take() @@ -4458,6 +4485,7 @@ impl BackgroundScanner { self.fs.as_ref(), self.watcher.as_ref(), ) + .await .log_err(); } } @@ -4496,11 +4524,11 @@ impl BackgroundScanner { &self, scan_job_tx: Sender, prev_snapshot: LocalSnapshot, - mut ignores_to_update: impl Iterator, IgnoreStack)>, + ignores_to_update: Vec<(Arc, IgnoreStack)>, ) { let (ignore_queue_tx, ignore_queue_rx) = channel::unbounded(); { - while let Some((parent_abs_path, ignore_stack)) = ignores_to_update.next() { + for (parent_abs_path, ignore_stack) in ignores_to_update { ignore_queue_tx .send_blocking(UpdateIgnoreStatusJob { abs_path: parent_abs_path, @@ -4573,26 +4601,27 @@ impl BackgroundScanner { ignores_to_update } - async fn order_ignores( - &self, - mut ignores: Vec>, - ) -> impl use<> + Iterator, IgnoreStack)> { + async fn order_ignores(&self, mut ignores: Vec>) -> Vec<(Arc, IgnoreStack)> { let fs = self.fs.clone(); let snapshot = self.state.lock().await.snapshot.clone(); ignores.sort_unstable(); let mut ignores_to_update = ignores.into_iter().peekable(); - std::iter::from_fn(move || { - let parent_abs_path = ignores_to_update.next()?; + + let mut result = vec![]; + while let Some(parent_abs_path) = ignores_to_update.next() { while ignores_to_update .peek() .map_or(false, |p| p.starts_with(&parent_abs_path)) { ignores_to_update.next().unwrap(); } - let ignore_stack = - snapshot.ignore_stack_for_abs_path(&parent_abs_path, true, fs.as_ref()); - Some((parent_abs_path, ignore_stack)) - }) + let ignore_stack = snapshot + .ignore_stack_for_abs_path(&parent_abs_path, true, fs.as_ref()) + .await; + result.push((parent_abs_path, ignore_stack)); + } + + result } async fn update_ignore_status(&self, job: UpdateIgnoreStatusJob, snapshot: &LocalSnapshot) { @@ -4624,9 +4653,7 @@ impl BackgroundScanner { return; }; - if let Ok(Some(metadata)) = self - .executor - .block(self.fs.metadata(&job.abs_path.join(DOT_GIT))) + if let Ok(Some(metadata)) = self.fs.metadata(&job.abs_path.join(DOT_GIT)).await && metadata.is_dir { ignore_stack.repo_root = Some(job.abs_path.clone()); @@ -4648,12 +4675,14 @@ impl BackgroundScanner { if was_ignored && !entry.is_ignored && entry.kind.is_unloaded() { let state = self.state.lock().await; if state.should_scan_directory(&entry) { - state.enqueue_scan_dir( - abs_path.clone(), - &entry, - &job.scan_queue, - self.fs.as_ref(), - ); + state + .enqueue_scan_dir( + abs_path.clone(), + &entry, + &job.scan_queue, + self.fs.as_ref(), + ) + .await; } } @@ -4725,13 +4754,15 @@ impl BackgroundScanner { return Vec::new(); }; affected_repo_roots.push(dot_git_dir.parent().unwrap().into()); - state.insert_git_repository( - RelPath::new(relative, PathStyle::local()) - .unwrap() - .into_arc(), - self.fs.as_ref(), - self.watcher.as_ref(), - ); + state + .insert_git_repository( + RelPath::new(relative, PathStyle::local()) + .unwrap() + .into_arc(), + self.fs.as_ref(), + self.watcher.as_ref(), + ) + .await; } Some(local_repository) => { state.snapshot.git_repositories.update( @@ -4759,8 +4790,7 @@ impl BackgroundScanner { if exists_in_snapshot || matches!( - self.executor - .block(self.fs.metadata(&entry.common_dir_abs_path)), + self.fs.metadata(&entry.common_dir_abs_path).await, Ok(Some(_)) ) { @@ -5519,16 +5549,13 @@ fn parse_gitfile(content: &str) -> anyhow::Result<&Path> { Ok(Path::new(path.trim())) } -fn discover_git_paths( - dot_git_abs_path: &Arc, - fs: &dyn Fs, - executor: &BackgroundExecutor, -) -> (Arc, Arc) { +async fn discover_git_paths(dot_git_abs_path: &Arc, fs: &dyn Fs) -> (Arc, Arc) { let mut repository_dir_abs_path = dot_git_abs_path.clone(); let mut common_dir_abs_path = dot_git_abs_path.clone(); - if let Some(path) = executor - .block(fs.load(dot_git_abs_path)) + if let Some(path) = fs + .load(dot_git_abs_path) + .await .ok() .as_ref() .and_then(|contents| parse_gitfile(contents).log_err()) @@ -5537,13 +5564,14 @@ fn discover_git_paths( .parent() .unwrap_or(Path::new("")) .join(path); - if let Some(path) = executor.block(fs.canonicalize(&path)).log_err() { + if let Some(path) = fs.canonicalize(&path).await.log_err() { repository_dir_abs_path = Path::new(&path).into(); common_dir_abs_path = repository_dir_abs_path.clone(); - if let Some(commondir_contents) = executor.block(fs.load(&path.join("commondir"))).ok() - && let Some(commondir_path) = executor - .block(fs.canonicalize(&path.join(commondir_contents.trim()))) + if let Some(commondir_contents) = fs.load(&path.join("commondir")).await.ok() + && let Some(commondir_path) = fs + .canonicalize(&path.join(commondir_contents.trim())) + .await .log_err() { common_dir_abs_path = commondir_path.as_path().into(); From 75fb172f062cd9aa24596274646466bb9e7d2bc2 Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Tue, 14 Oct 2025 00:09:24 +0200 Subject: [PATCH 09/22] A bunch of extra methods on fs --- crates/fs/src/fs.rs | 59 +++++++++++++++++++++++++++++++-------------- 1 file changed, 41 insertions(+), 18 deletions(-) diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index a4c7115427f417..7c2c32b31f8b6b 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -640,30 +640,47 @@ impl Fs for RealFs { if let Some(path) = path.parent() { self.create_dir(path).await?; } - smol::fs::write(path, content).await?; - Ok(()) + let path = path.to_owned(); + let contents = content.to_owned(); + self.executor + .spawn(async move { + std::fs::write(path, contents)?; + Ok(()) + }) + .await } async fn canonicalize(&self, path: &Path) -> Result { - Ok(smol::fs::canonicalize(path) + let path = path.to_owned(); + self.executor + .spawn(async move { + Ok(std::fs::canonicalize(&path) + .with_context(|| format!("canonicalizing {path:?}"))?) + }) .await - .with_context(|| format!("canonicalizing {path:?}"))?) } async fn is_file(&self, path: &Path) -> bool { - smol::fs::metadata(path) + let path = path.to_owned(); + self.executor + .spawn(async move { std::fs::metadata(path).is_ok_and(|metadata| metadata.is_file()) }) .await - .is_ok_and(|metadata| metadata.is_file()) } async fn is_dir(&self, path: &Path) -> bool { - smol::fs::metadata(path) + let path = path.to_owned(); + self.executor + .spawn(async move { std::fs::metadata(path).is_ok_and(|metadata| metadata.is_dir()) }) .await - .is_ok_and(|metadata| metadata.is_dir()) } async fn metadata(&self, path: &Path) -> Result> { - let symlink_metadata = match smol::fs::symlink_metadata(path).await { + let path_buf = path.to_owned(); + let symlink_metadata = match self + .executor + .spawn(async move { std::fs::symlink_metadata(&path_buf) }) + .await + { Ok(metadata) => metadata, Err(err) => { return match (err.kind(), err.raw_os_error()) { @@ -675,17 +692,23 @@ impl Fs for RealFs { }; let path_buf = path.to_path_buf(); - let path_exists = smol::unblock(move || { - path_buf - .try_exists() - .with_context(|| format!("checking existence for path {path_buf:?}")) - }) - .await?; + let path_exists = self + .executor + .spawn(async move { + path_buf + .try_exists() + .with_context(|| format!("checking existence for path {path_buf:?}")) + }) + .await?; let is_symlink = symlink_metadata.file_type().is_symlink(); let metadata = match (is_symlink, path_exists) { - (true, true) => smol::fs::metadata(path) - .await - .with_context(|| "accessing symlink for path {path}")?, + (true, true) => { + let path_buf = path.to_path_buf(); + self.executor + .spawn(async move { std::fs::metadata(path_buf) }) + .await + .with_context(|| "accessing symlink for path {path}")? + } _ => symlink_metadata, }; From e2f53f33f263bb0ed5b3eb2d9cf148e4891e61a7 Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Tue, 14 Oct 2025 11:52:59 +0200 Subject: [PATCH 10/22] Fix clippy Co-authored-by: Smit Barmase --- crates/fs/src/fs.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index 7c2c32b31f8b6b..42d80171f7a617 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -654,8 +654,7 @@ impl Fs for RealFs { let path = path.to_owned(); self.executor .spawn(async move { - Ok(std::fs::canonicalize(&path) - .with_context(|| format!("canonicalizing {path:?}"))?) + std::fs::canonicalize(&path).with_context(|| format!("canonicalizing {path:?}")) }) .await } From 025dd5ef7b6726ac7fc5b4e75f850d2bd508aa0e Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Tue, 14 Oct 2025 12:17:25 +0200 Subject: [PATCH 11/22] BG-fy read_dir.. at least partially. --- crates/fs/src/fs.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index 42d80171f7a617..9fc8cc1a34947d 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -7,6 +7,7 @@ pub mod fs_watcher; use anyhow::{Context as _, Result, anyhow}; #[cfg(any(target_os = "linux", target_os = "freebsd"))] use ashpd::desktop::trash; +use futures::stream::iter; use gpui::App; use gpui::BackgroundExecutor; use gpui::Global; @@ -742,7 +743,13 @@ impl Fs for RealFs { &self, path: &Path, ) -> Result>>>> { - let result = smol::fs::read_dir(path).await?.map(|entry| match entry { + let path = path.to_owned(); + let result = iter( + self.executor + .spawn(async move { std::fs::read_dir(path) }) + .await?, + ) + .map(|entry| match entry { Ok(entry) => Ok(entry.path()), Err(error) => Err(anyhow!("failed to read dir entry {error:?}")), }); From 32226d01df992d2f0b2f85e6c6f5b82eeca3243e Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Tue, 14 Oct 2025 12:22:30 +0200 Subject: [PATCH 12/22] fix 'test_strip_whitespace_and_format_via_lsp' --- crates/editor/src/editor_tests.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/editor/src/editor_tests.rs b/crates/editor/src/editor_tests.rs index dab89557cf137b..7d76e4d16262ab 100644 --- a/crates/editor/src/editor_tests.rs +++ b/crates/editor/src/editor_tests.rs @@ -12509,6 +12509,7 @@ async fn test_strip_whitespace_and_format_via_lsp(cx: &mut TestAppContext) { ) .await; + cx.run_until_parked(); // Set up a buffer white some trailing whitespace and no trailing newline. cx.set_state( &[ From a1975dc00d3df82b6387636c5225bc4e8bc715c0 Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Tue, 14 Oct 2025 13:07:33 +0200 Subject: [PATCH 13/22] read_link --- crates/fs/src/fs.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index 9fc8cc1a34947d..2094289e2b4d56 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -735,7 +735,11 @@ impl Fs for RealFs { } async fn read_link(&self, path: &Path) -> Result { - let path = smol::fs::read_link(path).await?; + let path = path.to_owned(); + let path = self + .executor + .spawn(async move { std::fs::read_link(&path) }) + .await?; Ok(path) } From 96c57df97021be021af98fd4cd38fc53a32fa3ac Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Tue, 14 Oct 2025 20:25:19 +0200 Subject: [PATCH 14/22] Minor opt --- crates/fs/src/fs.rs | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index 2094289e2b4d56..9907d0dcbde489 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -691,25 +691,28 @@ impl Fs for RealFs { } }; - let path_buf = path.to_path_buf(); - let path_exists = self - .executor - .spawn(async move { - path_buf - .try_exists() - .with_context(|| format!("checking existence for path {path_buf:?}")) - }) - .await?; let is_symlink = symlink_metadata.file_type().is_symlink(); - let metadata = match (is_symlink, path_exists) { - (true, true) => { + let metadata = if is_symlink { + let path_buf = path.to_path_buf(); + let path_exists = self + .executor + .spawn(async move { + path_buf + .try_exists() + .with_context(|| format!("checking existence for path {path_buf:?}")) + }) + .await?; + if path_exists { let path_buf = path.to_path_buf(); self.executor .spawn(async move { std::fs::metadata(path_buf) }) .await .with_context(|| "accessing symlink for path {path}")? + } else { + symlink_metadata } - _ => symlink_metadata, + } else { + symlink_metadata }; #[cfg(unix)] From 23d84e26c692fab1670bf54f1633aa783295751b Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Tue, 14 Oct 2025 21:31:31 +0200 Subject: [PATCH 15/22] Experiment: Bundle metadata with directory info --- crates/fs/src/fs.rs | 108 +++++++++++++++++++++-------- crates/prompt_store/src/prompts.rs | 1 + crates/theme/src/registry.rs | 2 +- crates/worktree/src/worktree.rs | 20 +++--- 4 files changed, 94 insertions(+), 37 deletions(-) diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index 9907d0dcbde489..faacc4317d6489 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -12,6 +12,7 @@ use gpui::App; use gpui::BackgroundExecutor; use gpui::Global; use gpui::ReadGlobal as _; +use smol::stream::StreamExt; use std::borrow::Cow; use util::command::new_smol_command; @@ -25,7 +26,7 @@ use std::os::unix::fs::{FileTypeExt, MetadataExt}; use std::mem::MaybeUninit; use async_tar::Archive; -use futures::{AsyncRead, Stream, StreamExt, future::BoxFuture}; +use futures::{AsyncRead, Stream, future::BoxFuture}; use git::repository::{GitRepository, RealGitRepository}; use rope::Rope; use serde::{Deserialize, Serialize}; @@ -124,7 +125,7 @@ pub trait Fs: Send + Sync { async fn read_dir( &self, path: &Path, - ) -> Result>>>>; + ) -> Result>>>>; async fn watch( &self, @@ -152,6 +153,49 @@ pub trait Fs: Send + Sync { } } +enum DirEntryPayload { + Entry(Option), + Metadata(Result), +} +pub struct DirEntry { + path: Arc, + payload: DirEntryPayload, + executor: BackgroundExecutor, +} + +impl DirEntry { + fn new(entry: std::fs::DirEntry, executor: BackgroundExecutor) -> Self { + let path = entry.path().into(); + + Self { + path, + payload: DirEntryPayload::Entry(Some(entry)), + executor, + } + } + pub async fn metadata(&mut self) -> &Result { + if let DirEntryPayload::Entry(entry) = &mut self.payload { + let dir_entry = entry.take().unwrap(); + let meta = self + .executor + .spawn(async move { dir_entry.metadata() }) + .await; + let is_symlink = meta.as_ref().ok().is_some_and(|meta| meta.is_symlink()); + let payload = match meta { + Ok(meta) => RealFs::std_meta_to_zed_metadata(&self.path, meta, is_symlink).await, + Err(e) => Err(e.into()), + }; + self.payload = DirEntryPayload::Metadata(payload); + } + let DirEntryPayload::Metadata(meta) = &self.payload else { + unreachable!(); + }; + meta + } + pub fn path(&self) -> &Arc { + &self.path + } +} struct GlobalFs(Arc); impl Global for GlobalFs {} @@ -334,6 +378,32 @@ impl RealFs { executor, } } + async fn std_meta_to_zed_metadata( + _path: &Path, + metadata: std::fs::Metadata, + is_symlink: bool, + ) -> Result { + #[cfg(unix)] + let inode = metadata.ino(); + + #[cfg(windows)] + let inode = file_id(_path).await?; + + #[cfg(windows)] + let is_fifo = false; + + #[cfg(unix)] + let is_fifo = metadata.file_type().is_fifo(); + + Ok(Metadata { + inode, + mtime: MTime(metadata.modified().unwrap_or(SystemTime::UNIX_EPOCH)), + len: metadata.len(), + is_symlink, + is_dir: metadata.file_type().is_dir(), + is_fifo, + }) + } } #[async_trait::async_trait] @@ -714,27 +784,9 @@ impl Fs for RealFs { } else { symlink_metadata }; - - #[cfg(unix)] - let inode = metadata.ino(); - - #[cfg(windows)] - let inode = file_id(path).await?; - - #[cfg(windows)] - let is_fifo = false; - - #[cfg(unix)] - let is_fifo = metadata.file_type().is_fifo(); - - Ok(Some(Metadata { - inode, - mtime: MTime(metadata.modified().unwrap_or(SystemTime::UNIX_EPOCH)), - len: metadata.len(), - is_symlink, - is_dir: metadata.file_type().is_dir(), - is_fifo, - })) + Self::std_meta_to_zed_metadata(path, metadata, is_symlink) + .await + .map(Some) } async fn read_link(&self, path: &Path) -> Result { @@ -749,15 +801,17 @@ impl Fs for RealFs { async fn read_dir( &self, path: &Path, - ) -> Result>>>> { + ) -> Result>>>> { let path = path.to_owned(); + + let executor = self.executor.clone(); let result = iter( self.executor .spawn(async move { std::fs::read_dir(path) }) .await?, ) - .map(|entry| match entry { - Ok(entry) => Ok(entry.path()), + .map(move |entry| match entry { + Ok(entry) => Ok(DirEntry::new(entry, executor.clone())), Err(error) => Err(anyhow!("failed to read dir entry {error:?}")), }); Ok(Box::pin(result)) @@ -2650,7 +2704,7 @@ fn read_recursive<'a>( let mut children = fs.read_dir(source).await?; while let Some(child_path) = children.next().await { if let Ok(child_path) = child_path { - read_recursive(fs, &child_path, output).await?; + read_recursive(fs, &child_path.path(), output).await?; } } } else { diff --git a/crates/prompt_store/src/prompts.rs b/crates/prompt_store/src/prompts.rs index e6a9144a23a7bb..f54ea5c4e83667 100644 --- a/crates/prompt_store/src/prompts.rs +++ b/crates/prompt_store/src/prompts.rs @@ -264,6 +264,7 @@ impl PromptBuilder { // Initial scan of the prompt overrides directory if let Ok(mut entries) = params.fs.read_dir(&templates_dir).await { while let Some(Ok(file_path)) = entries.next().await { + let file_path = file_path.path(); if file_path.to_string_lossy().ends_with(".hbs") && let Ok(content) = params.fs.load(&file_path).await { let file_name = file_path.file_stem().unwrap().to_string_lossy(); diff --git a/crates/theme/src/registry.rs b/crates/theme/src/registry.rs index c362b62704257f..89818391a562ee 100644 --- a/crates/theme/src/registry.rs +++ b/crates/theme/src/registry.rs @@ -220,7 +220,7 @@ impl ThemeRegistry { continue; }; - self.load_user_theme(&theme_path, fs.clone()) + self.load_user_theme(&theme_path.path(), fs.clone()) .await .log_err(); } diff --git a/crates/worktree/src/worktree.rs b/crates/worktree/src/worktree.rs index d3aa911ae27c97..c64be98a38651c 100644 --- a/crates/worktree/src/worktree.rs +++ b/crates/worktree/src/worktree.rs @@ -3,7 +3,10 @@ mod worktree_settings; #[cfg(test)] mod worktree_tests; -use ::ignore::gitignore::{Gitignore, GitignoreBuilder}; +use ::ignore::{ + DirEntry, + gitignore::{Gitignore, GitignoreBuilder}, +}; use anyhow::{Context as _, Result, anyhow}; use clock::ReplicaId; use collections::{HashMap, HashSet, VecDeque}; @@ -4179,13 +4182,13 @@ impl BackgroundScanner { swap_to_front(&mut child_paths, DOT_GIT); if let Some(path) = child_paths.first() - && path.ends_with(DOT_GIT) + && path.path().ends_with(DOT_GIT) { ignore_stack.repo_root = Some(job.abs_path.clone()); } - for child_abs_path in child_paths { - let child_abs_path: Arc = child_abs_path.into(); + for mut child in child_paths { + let child_abs_path: Arc = child.path().clone(); let child_name = child_abs_path.file_name().unwrap(); let Some(child_path) = child_name .to_str() @@ -4226,9 +4229,8 @@ impl BackgroundScanner { continue; } - let child_metadata = match self.fs.metadata(&child_abs_path).await { - Ok(Some(metadata)) => metadata, - Ok(None) => continue, + let child_metadata = match child.metadata().await { + Ok(metadata) => metadata, Err(err) => { log::error!("error processing {child_abs_path:?}: {err:?}"); continue; @@ -5005,10 +5007,10 @@ fn build_diff( changes.into() } -fn swap_to_front(child_paths: &mut Vec, file: &str) { +fn swap_to_front(child_paths: &mut Vec, file: &str) { let position = child_paths .iter() - .position(|path| path.file_name().unwrap() == file); + .position(|path| path.path().file_name().unwrap() == file); if let Some(position) = position { let temp = child_paths.remove(position); child_paths.insert(0, temp); From 64612405f93aea64f0c8712b5b86e817afa1dca2 Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Tue, 14 Oct 2025 21:33:03 +0200 Subject: [PATCH 16/22] Speculative: get rid of smol::unblock from windows' file_id impl --- crates/fs/src/fs.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index faacc4317d6489..c1c3ef4a5e43cd 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -2719,7 +2719,7 @@ fn read_recursive<'a>( // can we get file id not open the file twice? // https://github.com/rust-lang/rust/issues/63010 #[cfg(target_os = "windows")] -async fn file_id(path: impl AsRef) -> Result { +async fn file_id(executor: &BackgroundExecutor, path: impl AsRef) -> Result { use std::os::windows::io::AsRawHandle; use smol::fs::windows::OpenOptionsExt; @@ -2739,12 +2739,13 @@ async fn file_id(path: impl AsRef) -> Result { let mut info: BY_HANDLE_FILE_INFORMATION = unsafe { std::mem::zeroed() }; // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfileinformationbyhandle // This function supports Windows XP+ - smol::unblock(move || { - unsafe { GetFileInformationByHandle(HANDLE(file.as_raw_handle() as _), &mut info)? }; + executor + .spawn(async move { + unsafe { GetFileInformationByHandle(HANDLE(file.as_raw_handle() as _), &mut info)? }; - Ok(((info.nFileIndexHigh as u64) << 32) | (info.nFileIndexLow as u64)) - }) - .await + Ok(((info.nFileIndexHigh as u64) << 32) | (info.nFileIndexLow as u64)) + }) + .await } #[cfg(target_os = "windows")] From 1cfe4ec2e653de36fc7ec674c3861b2c3fa3dbfd Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Wed, 15 Oct 2025 11:09:46 +0200 Subject: [PATCH 17/22] Fix Windows/Linux build --- crates/fs/src/fs.rs | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index c1c3ef4a5e43cd..21f081e23c89e5 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -12,7 +12,6 @@ use gpui::App; use gpui::BackgroundExecutor; use gpui::Global; use gpui::ReadGlobal as _; -use smol::stream::StreamExt; use std::borrow::Cow; use util::command::new_smol_command; @@ -182,7 +181,10 @@ impl DirEntry { .await; let is_symlink = meta.as_ref().ok().is_some_and(|meta| meta.is_symlink()); let payload = match meta { - Ok(meta) => RealFs::std_meta_to_zed_metadata(&self.path, meta, is_symlink).await, + Ok(meta) => { + RealFs::std_meta_to_zed_metadata(&self.executor, &self.path, meta, is_symlink) + .await + } Err(e) => Err(e.into()), }; self.payload = DirEntryPayload::Metadata(payload); @@ -379,6 +381,7 @@ impl RealFs { } } async fn std_meta_to_zed_metadata( + _executor: &BackgroundExecutor, _path: &Path, metadata: std::fs::Metadata, is_symlink: bool, @@ -387,7 +390,7 @@ impl RealFs { let inode = metadata.ino(); #[cfg(windows)] - let inode = file_id(_path).await?; + let inode = file_id(_executor, _path).await?; #[cfg(windows)] let is_fifo = false; @@ -784,7 +787,7 @@ impl Fs for RealFs { } else { symlink_metadata }; - Self::std_meta_to_zed_metadata(path, metadata, is_symlink) + Self::std_meta_to_zed_metadata(&self.executor, path, metadata, is_symlink) .await .map(Some) } @@ -802,6 +805,7 @@ impl Fs for RealFs { &self, path: &Path, ) -> Result>>>> { + use smol::stream::StreamExt; let path = path.to_owned(); let executor = self.executor.clone(); @@ -917,7 +921,7 @@ impl Fs for RealFs { watcher.add(parent).log_err(); } } - + use futures::StreamExt; ( Box::pin(rx.filter_map({ let watcher = watcher.clone(); @@ -2700,6 +2704,7 @@ fn read_recursive<'a>( .with_context(|| format!("path does not exist: {source:?}"))?; if metadata.is_dir { + use futures::StreamExt; output.push((source.to_path_buf(), true)); let mut children = fs.read_dir(source).await?; while let Some(child_path) = children.next().await { From 38618f64c9cbd2fd7583bfb366356ae7db2b9e99 Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Wed, 15 Oct 2025 13:20:42 +0200 Subject: [PATCH 18/22] Revert "Fix Windows/Linux build" This reverts commit 30f9d855cfcc52ce7d01eef4863acd38bd6966fd. --- crates/fs/src/fs.rs | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index 21f081e23c89e5..c1c3ef4a5e43cd 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -12,6 +12,7 @@ use gpui::App; use gpui::BackgroundExecutor; use gpui::Global; use gpui::ReadGlobal as _; +use smol::stream::StreamExt; use std::borrow::Cow; use util::command::new_smol_command; @@ -181,10 +182,7 @@ impl DirEntry { .await; let is_symlink = meta.as_ref().ok().is_some_and(|meta| meta.is_symlink()); let payload = match meta { - Ok(meta) => { - RealFs::std_meta_to_zed_metadata(&self.executor, &self.path, meta, is_symlink) - .await - } + Ok(meta) => RealFs::std_meta_to_zed_metadata(&self.path, meta, is_symlink).await, Err(e) => Err(e.into()), }; self.payload = DirEntryPayload::Metadata(payload); @@ -381,7 +379,6 @@ impl RealFs { } } async fn std_meta_to_zed_metadata( - _executor: &BackgroundExecutor, _path: &Path, metadata: std::fs::Metadata, is_symlink: bool, @@ -390,7 +387,7 @@ impl RealFs { let inode = metadata.ino(); #[cfg(windows)] - let inode = file_id(_executor, _path).await?; + let inode = file_id(_path).await?; #[cfg(windows)] let is_fifo = false; @@ -787,7 +784,7 @@ impl Fs for RealFs { } else { symlink_metadata }; - Self::std_meta_to_zed_metadata(&self.executor, path, metadata, is_symlink) + Self::std_meta_to_zed_metadata(path, metadata, is_symlink) .await .map(Some) } @@ -805,7 +802,6 @@ impl Fs for RealFs { &self, path: &Path, ) -> Result>>>> { - use smol::stream::StreamExt; let path = path.to_owned(); let executor = self.executor.clone(); @@ -921,7 +917,7 @@ impl Fs for RealFs { watcher.add(parent).log_err(); } } - use futures::StreamExt; + ( Box::pin(rx.filter_map({ let watcher = watcher.clone(); @@ -2704,7 +2700,6 @@ fn read_recursive<'a>( .with_context(|| format!("path does not exist: {source:?}"))?; if metadata.is_dir { - use futures::StreamExt; output.push((source.to_path_buf(), true)); let mut children = fs.read_dir(source).await?; while let Some(child_path) = children.next().await { From 72ed106925d6666dd4a9b0a420fed3547ab482b9 Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Wed, 15 Oct 2025 13:20:47 +0200 Subject: [PATCH 19/22] Revert "Speculative: get rid of smol::unblock from windows' file_id impl" This reverts commit 7710ab671c8f0a2a1a3f8b061cd67086a281797a. --- crates/fs/src/fs.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index c1c3ef4a5e43cd..faacc4317d6489 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -2719,7 +2719,7 @@ fn read_recursive<'a>( // can we get file id not open the file twice? // https://github.com/rust-lang/rust/issues/63010 #[cfg(target_os = "windows")] -async fn file_id(executor: &BackgroundExecutor, path: impl AsRef) -> Result { +async fn file_id(path: impl AsRef) -> Result { use std::os::windows::io::AsRawHandle; use smol::fs::windows::OpenOptionsExt; @@ -2739,13 +2739,12 @@ async fn file_id(executor: &BackgroundExecutor, path: impl AsRef) -> Resul let mut info: BY_HANDLE_FILE_INFORMATION = unsafe { std::mem::zeroed() }; // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfileinformationbyhandle // This function supports Windows XP+ - executor - .spawn(async move { - unsafe { GetFileInformationByHandle(HANDLE(file.as_raw_handle() as _), &mut info)? }; + smol::unblock(move || { + unsafe { GetFileInformationByHandle(HANDLE(file.as_raw_handle() as _), &mut info)? }; - Ok(((info.nFileIndexHigh as u64) << 32) | (info.nFileIndexLow as u64)) - }) - .await + Ok(((info.nFileIndexHigh as u64) << 32) | (info.nFileIndexLow as u64)) + }) + .await } #[cfg(target_os = "windows")] From 50aaf893405d25e15f573329f07fcf847a377e0e Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Wed, 15 Oct 2025 13:21:07 +0200 Subject: [PATCH 20/22] Revert "Experiment: Bundle metadata with directory info" This reverts commit d0fcfe77e655dba9a98a5eaa67932ecbdef7dbb6. --- crates/fs/src/fs.rs | 108 ++++++++--------------------- crates/prompt_store/src/prompts.rs | 1 - crates/theme/src/registry.rs | 2 +- crates/worktree/src/worktree.rs | 20 +++--- 4 files changed, 37 insertions(+), 94 deletions(-) diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index faacc4317d6489..9907d0dcbde489 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -12,7 +12,6 @@ use gpui::App; use gpui::BackgroundExecutor; use gpui::Global; use gpui::ReadGlobal as _; -use smol::stream::StreamExt; use std::borrow::Cow; use util::command::new_smol_command; @@ -26,7 +25,7 @@ use std::os::unix::fs::{FileTypeExt, MetadataExt}; use std::mem::MaybeUninit; use async_tar::Archive; -use futures::{AsyncRead, Stream, future::BoxFuture}; +use futures::{AsyncRead, Stream, StreamExt, future::BoxFuture}; use git::repository::{GitRepository, RealGitRepository}; use rope::Rope; use serde::{Deserialize, Serialize}; @@ -125,7 +124,7 @@ pub trait Fs: Send + Sync { async fn read_dir( &self, path: &Path, - ) -> Result>>>>; + ) -> Result>>>>; async fn watch( &self, @@ -153,49 +152,6 @@ pub trait Fs: Send + Sync { } } -enum DirEntryPayload { - Entry(Option), - Metadata(Result), -} -pub struct DirEntry { - path: Arc, - payload: DirEntryPayload, - executor: BackgroundExecutor, -} - -impl DirEntry { - fn new(entry: std::fs::DirEntry, executor: BackgroundExecutor) -> Self { - let path = entry.path().into(); - - Self { - path, - payload: DirEntryPayload::Entry(Some(entry)), - executor, - } - } - pub async fn metadata(&mut self) -> &Result { - if let DirEntryPayload::Entry(entry) = &mut self.payload { - let dir_entry = entry.take().unwrap(); - let meta = self - .executor - .spawn(async move { dir_entry.metadata() }) - .await; - let is_symlink = meta.as_ref().ok().is_some_and(|meta| meta.is_symlink()); - let payload = match meta { - Ok(meta) => RealFs::std_meta_to_zed_metadata(&self.path, meta, is_symlink).await, - Err(e) => Err(e.into()), - }; - self.payload = DirEntryPayload::Metadata(payload); - } - let DirEntryPayload::Metadata(meta) = &self.payload else { - unreachable!(); - }; - meta - } - pub fn path(&self) -> &Arc { - &self.path - } -} struct GlobalFs(Arc); impl Global for GlobalFs {} @@ -378,32 +334,6 @@ impl RealFs { executor, } } - async fn std_meta_to_zed_metadata( - _path: &Path, - metadata: std::fs::Metadata, - is_symlink: bool, - ) -> Result { - #[cfg(unix)] - let inode = metadata.ino(); - - #[cfg(windows)] - let inode = file_id(_path).await?; - - #[cfg(windows)] - let is_fifo = false; - - #[cfg(unix)] - let is_fifo = metadata.file_type().is_fifo(); - - Ok(Metadata { - inode, - mtime: MTime(metadata.modified().unwrap_or(SystemTime::UNIX_EPOCH)), - len: metadata.len(), - is_symlink, - is_dir: metadata.file_type().is_dir(), - is_fifo, - }) - } } #[async_trait::async_trait] @@ -784,9 +714,27 @@ impl Fs for RealFs { } else { symlink_metadata }; - Self::std_meta_to_zed_metadata(path, metadata, is_symlink) - .await - .map(Some) + + #[cfg(unix)] + let inode = metadata.ino(); + + #[cfg(windows)] + let inode = file_id(path).await?; + + #[cfg(windows)] + let is_fifo = false; + + #[cfg(unix)] + let is_fifo = metadata.file_type().is_fifo(); + + Ok(Some(Metadata { + inode, + mtime: MTime(metadata.modified().unwrap_or(SystemTime::UNIX_EPOCH)), + len: metadata.len(), + is_symlink, + is_dir: metadata.file_type().is_dir(), + is_fifo, + })) } async fn read_link(&self, path: &Path) -> Result { @@ -801,17 +749,15 @@ impl Fs for RealFs { async fn read_dir( &self, path: &Path, - ) -> Result>>>> { + ) -> Result>>>> { let path = path.to_owned(); - - let executor = self.executor.clone(); let result = iter( self.executor .spawn(async move { std::fs::read_dir(path) }) .await?, ) - .map(move |entry| match entry { - Ok(entry) => Ok(DirEntry::new(entry, executor.clone())), + .map(|entry| match entry { + Ok(entry) => Ok(entry.path()), Err(error) => Err(anyhow!("failed to read dir entry {error:?}")), }); Ok(Box::pin(result)) @@ -2704,7 +2650,7 @@ fn read_recursive<'a>( let mut children = fs.read_dir(source).await?; while let Some(child_path) = children.next().await { if let Ok(child_path) = child_path { - read_recursive(fs, &child_path.path(), output).await?; + read_recursive(fs, &child_path, output).await?; } } } else { diff --git a/crates/prompt_store/src/prompts.rs b/crates/prompt_store/src/prompts.rs index f54ea5c4e83667..e6a9144a23a7bb 100644 --- a/crates/prompt_store/src/prompts.rs +++ b/crates/prompt_store/src/prompts.rs @@ -264,7 +264,6 @@ impl PromptBuilder { // Initial scan of the prompt overrides directory if let Ok(mut entries) = params.fs.read_dir(&templates_dir).await { while let Some(Ok(file_path)) = entries.next().await { - let file_path = file_path.path(); if file_path.to_string_lossy().ends_with(".hbs") && let Ok(content) = params.fs.load(&file_path).await { let file_name = file_path.file_stem().unwrap().to_string_lossy(); diff --git a/crates/theme/src/registry.rs b/crates/theme/src/registry.rs index 89818391a562ee..c362b62704257f 100644 --- a/crates/theme/src/registry.rs +++ b/crates/theme/src/registry.rs @@ -220,7 +220,7 @@ impl ThemeRegistry { continue; }; - self.load_user_theme(&theme_path.path(), fs.clone()) + self.load_user_theme(&theme_path, fs.clone()) .await .log_err(); } diff --git a/crates/worktree/src/worktree.rs b/crates/worktree/src/worktree.rs index c64be98a38651c..d3aa911ae27c97 100644 --- a/crates/worktree/src/worktree.rs +++ b/crates/worktree/src/worktree.rs @@ -3,10 +3,7 @@ mod worktree_settings; #[cfg(test)] mod worktree_tests; -use ::ignore::{ - DirEntry, - gitignore::{Gitignore, GitignoreBuilder}, -}; +use ::ignore::gitignore::{Gitignore, GitignoreBuilder}; use anyhow::{Context as _, Result, anyhow}; use clock::ReplicaId; use collections::{HashMap, HashSet, VecDeque}; @@ -4182,13 +4179,13 @@ impl BackgroundScanner { swap_to_front(&mut child_paths, DOT_GIT); if let Some(path) = child_paths.first() - && path.path().ends_with(DOT_GIT) + && path.ends_with(DOT_GIT) { ignore_stack.repo_root = Some(job.abs_path.clone()); } - for mut child in child_paths { - let child_abs_path: Arc = child.path().clone(); + for child_abs_path in child_paths { + let child_abs_path: Arc = child_abs_path.into(); let child_name = child_abs_path.file_name().unwrap(); let Some(child_path) = child_name .to_str() @@ -4229,8 +4226,9 @@ impl BackgroundScanner { continue; } - let child_metadata = match child.metadata().await { - Ok(metadata) => metadata, + let child_metadata = match self.fs.metadata(&child_abs_path).await { + Ok(Some(metadata)) => metadata, + Ok(None) => continue, Err(err) => { log::error!("error processing {child_abs_path:?}: {err:?}"); continue; @@ -5007,10 +5005,10 @@ fn build_diff( changes.into() } -fn swap_to_front(child_paths: &mut Vec, file: &str) { +fn swap_to_front(child_paths: &mut Vec, file: &str) { let position = child_paths .iter() - .position(|path| path.path().file_name().unwrap() == file); + .position(|path| path.file_name().unwrap() == file); if let Some(position) = position { let temp = child_paths.remove(position); child_paths.insert(0, temp); From ade9dfe58b07b189595c6faed9eb171dcdc8b58d Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Wed, 15 Oct 2025 20:50:12 +0200 Subject: [PATCH 21/22] CI fixes --- Cargo.lock | 1 + crates/worktree_benchmarks/Cargo.toml | 1 + crates/worktree_benchmarks/LICENSE-GPL | 1 + 3 files changed, 3 insertions(+) create mode 120000 crates/worktree_benchmarks/LICENSE-GPL diff --git a/Cargo.lock b/Cargo.lock index 7c8c16ce12e997..7f74e688ee2fe8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -20905,6 +20905,7 @@ dependencies = [ "fs", "gpui", "settings", + "workspace-hack", "worktree", ] diff --git a/crates/worktree_benchmarks/Cargo.toml b/crates/worktree_benchmarks/Cargo.toml index 29681573adc9da..6fcb66fea856cf 100644 --- a/crates/worktree_benchmarks/Cargo.toml +++ b/crates/worktree_benchmarks/Cargo.toml @@ -9,6 +9,7 @@ fs.workspace = true gpui = { workspace = true, features = ["windows-manifest"] } settings.workspace = true worktree.workspace = true +workspace-hack = { version = "0.1", path = "../../tooling/workspace-hack" } [lints] workspace = true diff --git a/crates/worktree_benchmarks/LICENSE-GPL b/crates/worktree_benchmarks/LICENSE-GPL new file mode 120000 index 00000000000000..89e542f750cd38 --- /dev/null +++ b/crates/worktree_benchmarks/LICENSE-GPL @@ -0,0 +1 @@ +../../LICENSE-GPL \ No newline at end of file From 0403e5c572e0854e699954d99e60c78468dd5360 Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Wed, 15 Oct 2025 21:06:34 +0200 Subject: [PATCH 22/22] Add missing license --- crates/fs_benchmarks/LICENSE-GPL | 1 + 1 file changed, 1 insertion(+) create mode 120000 crates/fs_benchmarks/LICENSE-GPL diff --git a/crates/fs_benchmarks/LICENSE-GPL b/crates/fs_benchmarks/LICENSE-GPL new file mode 120000 index 00000000000000..89e542f750cd38 --- /dev/null +++ b/crates/fs_benchmarks/LICENSE-GPL @@ -0,0 +1 @@ +../../LICENSE-GPL \ No newline at end of file