From 4f222d509b0c59258c3c69b1ac7fdc6acb1c421b Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Mon, 9 Feb 2026 17:40:16 -0800 Subject: [PATCH 01/57] MES-710: Add an async icache --- src/fs/icache/async_cache.rs | 840 +++++++++++++++++++++++++++++++++++ src/fs/icache/mod.rs | 6 + 2 files changed, 846 insertions(+) create mode 100644 src/fs/icache/async_cache.rs diff --git a/src/fs/icache/async_cache.rs b/src/fs/icache/async_cache.rs new file mode 100644 index 0000000..d10d3ac --- /dev/null +++ b/src/fs/icache/async_cache.rs @@ -0,0 +1,840 @@ +//! Async inode cache with InFlight/Available state machine. + +use std::future::Future; +use std::sync::atomic::{AtomicU64, Ordering}; + +use scc::HashMap as ConcurrentHashMap; +use tokio::sync::watch; + +use tracing::{trace, warn}; + +use crate::fs::r#trait::{FileHandle, Inode}; + +use super::IcbLike; + +/// State of an entry in the async inode cache. +pub enum IcbState { + /// Entry is being loaded; waiters clone the receiver and `.changed().await`. + InFlight(watch::Receiver<()>), + /// Entry is ready for use. + Available(I), +} + +/// Trait for resolving an inode to its control block. +/// +/// Implementations act as a "promise" that an ICB will eventually be produced +/// for a given inode. The cache calls `resolve` when it needs to populate a +/// missing entry. +pub trait IcbResolver: Send + Sync { + /// The inode control block type this resolver produces. + type Icb: IcbLike + Send + Sync; + /// Error type returned when resolution fails. + type Error: Send; + + /// Resolve an inode to its control block. + fn resolve(&self, ino: Inode) -> impl Future> + Send; +} + +/// Async, concurrency-safe inode cache. +/// +/// All methods take `&self` — internal synchronization is provided by +/// `scc::HashMap` (sharded lock-free map) and `AtomicU64`. +pub struct AsyncICache { + resolver: R, + inode_table: ConcurrentHashMap>, + next_fh: AtomicU64, +} + +impl AsyncICache { + /// Create a new cache with a root ICB at `root_ino` (rc = 1). + pub fn new(resolver: R, root_ino: Inode, root_path: impl Into) -> Self { + let table = ConcurrentHashMap::new(); + // insert_sync is infallible for a fresh map + drop(table.insert_sync( + root_ino, + IcbState::Available(R::Icb::new_root(root_path.into())), + )); + Self { + resolver, + inode_table: table, + next_fh: AtomicU64::new(1), + } + } + + /// Allocate a monotonically increasing file handle. + pub fn allocate_fh(&self) -> FileHandle { + self.next_fh.fetch_add(1, Ordering::Relaxed) + } + + /// Number of entries (`InFlight` + `Available`) in the table. + pub fn inode_count(&self) -> usize { + self.inode_table.len() + } + + /// Wait until `ino` is `Available`. + /// Returns `true` if the entry exists and is Available, + /// `false` if the entry does not exist. + async fn wait_for_available(&self, ino: Inode) -> bool { + let rx = self + .inode_table + .read_async(&ino, |_, s| match s { + IcbState::InFlight(rx) => Some(rx.clone()), + IcbState::Available(_) => None, + }) + .await; + + match rx { + None => false, // key missing + Some(None) => true, // Available + Some(Some(mut rx)) => { + // Wait for the resolver to complete (or fail/drop sender). + // changed() returns Err(RecvError) when sender is dropped, + // which is fine — it means resolution finished. + let _ = rx.changed().await; + // Re-check: entry is now Available or was removed on error. + self.inode_table + .read_async(&ino, |_, s| matches!(s, IcbState::Available(_))) + .await + .unwrap_or(false) + } + } + } + + /// Check whether `ino` exists. **Awaits** if the entry is `InFlight`. + pub async fn contains(&self, ino: Inode) -> bool { + self.wait_for_available(ino).await + } + + /// Read an ICB via closure. **Awaits** if `InFlight`. + /// Returns `None` if `ino` doesn't exist. + pub async fn get_icb(&self, ino: Inode, f: impl FnOnce(&R::Icb) -> T) -> Option { + if !self.wait_for_available(ino).await { + return None; + } + self.inode_table + .read_async(&ino, |_, state| match state { + IcbState::Available(icb) => Some(f(icb)), + IcbState::InFlight(_) => None, + }) + .await + .flatten() + } + + /// Mutate an ICB via closure. **Awaits** if `InFlight`. + /// Returns `None` if `ino` doesn't exist. + pub async fn get_icb_mut(&self, ino: Inode, f: impl FnOnce(&mut R::Icb) -> T) -> Option { + if !self.wait_for_available(ino).await { + return None; + } + self.inode_table + .update_async(&ino, |_, state| match state { + IcbState::Available(icb) => Some(f(icb)), + IcbState::InFlight(_) => None, + }) + .await + .flatten() + } + + /// Insert an ICB directly as `Available` (overwrites any existing entry). + pub async fn insert_icb(&self, ino: Inode, icb: R::Icb) { + self.inode_table + .upsert_async(ino, IcbState::Available(icb)) + .await; + } + + /// Get-or-insert pattern. If `ino` exists (awaits `InFlight`), runs `then` + /// on it. If absent, calls `factory` to create, inserts, then runs `then`. + /// + /// Both `factory` and `then` are `FnOnce` — wrapped in `Option` internally + /// to satisfy the borrow checker across the await-loop. + pub async fn entry_or_insert_icb( + &self, + ino: Inode, + factory: impl FnOnce() -> R::Icb, + then: impl FnOnce(&mut R::Icb) -> T, + ) -> T { + use scc::hash_map::Entry; + let mut factory = Some(factory); + let mut then_fn = Some(then); + + loop { + match self.inode_table.entry_async(ino).await { + Entry::Occupied(mut occ) => match occ.get_mut() { + IcbState::Available(icb) => { + let t = then_fn.take().unwrap_or_else(|| unreachable!()); + return t(icb); + } + IcbState::InFlight(rx) => { + let mut rx = rx.clone(); + drop(occ); // release shard lock before awaiting + let _ = rx.changed().await; + } + }, + Entry::Vacant(vac) => { + let f = factory.take().unwrap_or_else(|| unreachable!()); + let mut occ = vac.insert_entry(IcbState::Available(f())); + if let IcbState::Available(icb) = occ.get_mut() { + let t = then_fn.take().unwrap_or_else(|| unreachable!()); + return t(icb); + } + unreachable!("just inserted Available"); + } + } + } + } + + /// Look up `ino`. If `Available`, run `then` and return `Ok(T)`. + /// If absent, call the resolver to fetch the ICB, cache it, then run `then`. + /// If another task is already resolving this inode (`InFlight`), wait for it. + /// + /// Returns `Err(R::Error)` if resolution fails. On error the `InFlight` + /// entry is removed so subsequent calls can retry. + pub async fn get_or_resolve( + &self, + ino: Inode, + then: impl FnOnce(&R::Icb) -> T, + ) -> Result { + use scc::hash_map::Entry; + + let mut then_fn = Some(then); + + // Fast path: already Available + { + let hit = self + .inode_table + .read_async(&ino, |_, s| match s { + IcbState::Available(icb) => { + let t = then_fn.take().unwrap_or_else(|| unreachable!()); + Some(t(icb)) + } + IcbState::InFlight(_) => None, + }) + .await; + if let Some(Some(r)) = hit { + return Ok(r); + } + // InFlight or absent -- fall through + } + + // Try to become the resolver, or wait on existing InFlight + loop { + match self.inode_table.entry_async(ino).await { + Entry::Occupied(mut occ) => match occ.get_mut() { + IcbState::Available(icb) => { + let t = then_fn.take().unwrap_or_else(|| unreachable!()); + return Ok(t(icb)); + } + IcbState::InFlight(rx) => { + let mut rx = rx.clone(); + drop(occ); + let _ = rx.changed().await; + // Loop back to re-check + } + }, + Entry::Vacant(vac) => { + // We win the race -- install InFlight and resolve + let (tx, rx) = watch::channel(()); + vac.insert_entry(IcbState::InFlight(rx)); + + match self.resolver.resolve(ino).await { + Ok(icb) => { + let t = then_fn.take().unwrap_or_else(|| unreachable!()); + let result = t(&icb); + self.inode_table + .upsert_async(ino, IcbState::Available(icb)) + .await; + drop(tx); // wake all waiters + return Ok(result); + } + Err(e) => { + // Remove the InFlight entry + self.inode_table.remove_async(&ino).await; + drop(tx); // wake all waiters -- they'll see entry missing + return Err(e); + } + } + } + } + } + } + + /// Increment rc. **Awaits** `InFlight`. Panics if inode is missing. + pub async fn inc_rc(&self, ino: Inode) -> u64 { + self.wait_for_available(ino).await; + self.inode_table + .update_async(&ino, |_, state| match state { + IcbState::Available(icb) => { + *icb.rc_mut() += 1; + icb.rc() + } + IcbState::InFlight(_) => unreachable!("inc_rc after wait_for_available"), + }) + .await + .unwrap_or_else(|| unreachable!("inc_rc: inode {ino} not in table")) + } + + /// Decrement rc by `nlookups`. If rc drops to zero, evicts and returns + /// the ICB. **Awaits** `InFlight` entries. + pub async fn forget(&self, ino: Inode, nlookups: u64) -> Option { + if !self.wait_for_available(ino).await { + warn!(ino, "forget on unknown inode"); + return None; + } + + // Atomically remove if rc <= nlookups + let removed = self + .inode_table + .remove_if_async( + &ino, + |state| matches!(state, IcbState::Available(icb) if icb.rc() <= nlookups), + ) + .await; + + if let Some((_, IcbState::Available(icb))) = removed { + trace!(ino, "evicting inode"); + return Some(icb); + } + + // Entry survives — decrement rc + self.inode_table + .update_async(&ino, |_, state| { + if let IcbState::Available(icb) = state { + *icb.rc_mut() -= nlookups; + trace!(ino, new_rc = icb.rc(), "decremented rc"); + } + }) + .await; + + None + } + + /// Iterate over all `Available` entries (skips `InFlight`). + pub fn for_each(&self, mut f: impl FnMut(&Inode, &R::Icb)) { + self.inode_table.iter_sync(|ino, state| { + if let IcbState::Available(icb) = state { + f(ino, icb); + } + true // continue iteration + }); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::collections::HashMap as StdHashMap; + use std::path::PathBuf; + use std::sync::{Arc, Mutex}; + + #[derive(Debug, Clone, PartialEq)] + struct TestIcb { + rc: u64, + path: PathBuf, + } + + impl IcbLike for TestIcb { + fn new_root(path: PathBuf) -> Self { + Self { rc: 1, path } + } + fn rc(&self) -> u64 { + self.rc + } + fn rc_mut(&mut self) -> &mut u64 { + &mut self.rc + } + } + + struct TestResolver { + responses: Mutex>>, + } + + impl TestResolver { + fn new() -> Self { + Self { + responses: Mutex::new(StdHashMap::new()), + } + } + + fn add(&self, ino: Inode, icb: TestIcb) { + self.responses + .lock() + .expect("test mutex") + .insert(ino, Ok(icb)); + } + + fn add_err(&self, ino: Inode, err: impl Into) { + self.responses + .lock() + .expect("test mutex") + .insert(ino, Err(err.into())); + } + } + + impl IcbResolver for TestResolver { + type Icb = TestIcb; + type Error = String; + + fn resolve( + &self, + ino: Inode, + ) -> impl Future> + Send { + let result = self + .responses + .lock() + .expect("test mutex") + .remove(&ino) + .unwrap_or_else(|| Err(format!("no response for inode {ino}"))); + async move { result } + } + } + + fn test_cache() -> AsyncICache { + AsyncICache::new(TestResolver::new(), 1, "/root") + } + + fn test_cache_with(resolver: TestResolver) -> AsyncICache { + AsyncICache::new(resolver, 1, "/root") + } + + #[tokio::test] + async fn contains_returns_true_for_root() { + let cache = test_cache(); + assert!(cache.contains(1).await, "root should exist"); + } + + #[tokio::test] + async fn contains_returns_false_for_missing() { + let cache = test_cache(); + assert!(!cache.contains(999).await, "missing inode should not exist"); + } + + #[tokio::test] + async fn contains_after_resolver_completes() { + let resolver = TestResolver::new(); + resolver.add( + 42, + TestIcb { + rc: 1, + path: "/test".into(), + }, + ); + let cache = Arc::new(test_cache_with(resolver)); + + // Trigger resolve in background + let cache2 = Arc::clone(&cache); + let handle = tokio::spawn(async move { cache2.get_or_resolve(42, |_| ()).await }); + + handle + .await + .expect("task panicked") + .expect("resolve failed"); + assert!(cache.contains(42).await, "should be true after resolve"); + } + + #[tokio::test] + async fn new_creates_root_entry() { + let cache = test_cache(); + assert_eq!(cache.inode_count(), 1, "should have exactly 1 entry"); + } + + #[tokio::test] + async fn get_icb_returns_value() { + let cache = test_cache(); + let path = cache.get_icb(1, |icb| icb.path.clone()).await; + assert_eq!(path, Some(PathBuf::from("/root"))); + } + + #[tokio::test] + async fn get_icb_returns_none_for_missing() { + let cache = test_cache(); + let result = cache.get_icb(999, IcbLike::rc).await; + assert_eq!(result, None, "missing inode should return None"); + } + + #[tokio::test] + async fn get_icb_mut_modifies_value() { + let cache = test_cache(); + cache + .get_icb_mut(1, |icb| { + *icb.rc_mut() += 10; + }) + .await; + let rc = cache.get_icb(1, IcbLike::rc).await; + assert_eq!(rc, Some(11), "root starts at rc=1, +10 = 11"); + } + + #[tokio::test] + async fn get_icb_after_resolver_completes() { + let resolver = TestResolver::new(); + resolver.add( + 42, + TestIcb { + rc: 1, + path: "/loaded".into(), + }, + ); + let cache = test_cache_with(resolver); + + // Resolve inode 42 + cache + .get_or_resolve(42, |_| ()) + .await + .expect("resolve failed"); + + let path = cache.get_icb(42, |icb| icb.path.clone()).await; + assert_eq!(path, Some(PathBuf::from("/loaded"))); + } + + #[tokio::test] + async fn insert_icb_adds_entry() { + let cache = test_cache(); + cache + .insert_icb( + 42, + TestIcb { + rc: 1, + path: "/foo".into(), + }, + ) + .await; + assert!(cache.contains(42).await, "inserted entry should exist"); + assert_eq!(cache.inode_count(), 2, "root + inserted = 2"); + } + + #[tokio::test] + async fn entry_or_insert_creates_new() { + let cache = test_cache(); + let rc = cache + .entry_or_insert_icb( + 42, + || TestIcb { + rc: 0, + path: "/new".into(), + }, + |icb| { + *icb.rc_mut() += 1; + icb.rc() + }, + ) + .await; + assert_eq!(rc, 1, "factory creates rc=0, then +1 = 1"); + } + + #[tokio::test] + async fn entry_or_insert_returns_existing() { + let cache = test_cache(); + cache + .insert_icb( + 42, + TestIcb { + rc: 5, + path: "/existing".into(), + }, + ) + .await; + + let rc = cache + .entry_or_insert_icb( + 42, + || panic!("factory should not be called"), + |icb| icb.rc(), + ) + .await; + assert_eq!(rc, 5, "existing entry rc should be 5"); + } + + #[tokio::test] + async fn entry_or_insert_after_resolver_completes() { + let resolver = TestResolver::new(); + resolver.add( + 42, + TestIcb { + rc: 1, + path: "/resolved".into(), + }, + ); + let cache = Arc::new(test_cache_with(resolver)); + + // Start resolve in background + let cache2 = Arc::clone(&cache); + let resolve_handle = tokio::spawn(async move { cache2.get_or_resolve(42, |_| ()).await }); + + // Wait for resolve to finish + resolve_handle + .await + .expect("task panicked") + .expect("resolve failed"); + + // Now entry_or_insert should find the existing entry + let rc = cache + .entry_or_insert_icb( + 42, + || panic!("factory should not be called"), + |icb| icb.rc(), + ) + .await; + assert_eq!(rc, 1, "should find the resolved entry"); + } + + #[tokio::test] + async fn inc_rc_increments() { + let cache = test_cache(); + cache + .insert_icb( + 42, + TestIcb { + rc: 1, + path: "/a".into(), + }, + ) + .await; + let new_rc = cache.inc_rc(42).await; + assert_eq!(new_rc, 2, "rc 1 + 1 = 2"); + } + + #[tokio::test] + async fn forget_decrements_rc() { + let cache = test_cache(); + cache + .insert_icb( + 42, + TestIcb { + rc: 5, + path: "/a".into(), + }, + ) + .await; + + let evicted = cache.forget(42, 2).await; + assert!(evicted.is_none(), "rc 5 - 2 = 3, should not evict"); + + let rc = cache.get_icb(42, IcbLike::rc).await; + assert_eq!(rc, Some(3), "rc should be 3 after forget(2)"); + } + + #[tokio::test] + async fn forget_evicts_when_rc_drops_to_zero() { + let cache = test_cache(); + cache + .insert_icb( + 42, + TestIcb { + rc: 3, + path: "/a".into(), + }, + ) + .await; + + let evicted = cache.forget(42, 3).await; + assert!(evicted.is_some(), "rc 3 - 3 = 0, should evict"); + assert!(!cache.contains(42).await, "evicted entry should be gone"); + assert_eq!(cache.inode_count(), 1, "only root remains"); + } + + #[tokio::test] + async fn forget_unknown_inode_returns_none() { + let cache = test_cache(); + let evicted = cache.forget(999, 1).await; + assert!(evicted.is_none(), "unknown inode should return None"); + } + + #[tokio::test] + async fn for_each_iterates_available_entries() { + let cache = test_cache(); + cache + .insert_icb( + 2, + TestIcb { + rc: 1, + path: "/a".into(), + }, + ) + .await; + cache + .insert_icb( + 3, + TestIcb { + rc: 1, + path: "/b".into(), + }, + ) + .await; + + let mut seen = std::collections::HashSet::new(); + cache.for_each(|ino, _icb| { + seen.insert(*ino); + }); + assert_eq!(seen.len(), 3, "should see all 3 entries"); + assert!(seen.contains(&1), "should contain root"); + assert!(seen.contains(&2), "should contain inode 2"); + assert!(seen.contains(&3), "should contain inode 3"); + } + + #[tokio::test] + async fn for_each_skips_inflight() { + let cache = test_cache(); + // Directly insert an InFlight entry for testing iteration + let (_tx, rx) = watch::channel(()); + cache + .inode_table + .upsert_async(42, IcbState::InFlight(rx)) + .await; + + let mut count = 0; + cache.for_each(|_, _| { + count += 1; + }); + assert_eq!(count, 1, "only root, not the InFlight entry"); + } + + #[tokio::test] + async fn allocate_fh_increments() { + let cache = test_cache(); + assert_eq!(cache.allocate_fh(), 1, "first fh should be 1"); + assert_eq!(cache.allocate_fh(), 2, "second fh should be 2"); + assert_eq!(cache.allocate_fh(), 3, "third fh should be 3"); + } + + #[tokio::test] + async fn wait_does_not_miss_signal_on_immediate_complete() { + let cache = Arc::new(test_cache()); + + // Insert InFlight manually, then immediately complete before anyone waits + let (tx, rx) = watch::channel(()); + cache + .inode_table + .upsert_async(42, IcbState::InFlight(rx)) + .await; + + // Complete before any waiter + cache + .insert_icb( + 42, + TestIcb { + rc: 1, + path: "/fast".into(), + }, + ) + .await; + drop(tx); + + // This must NOT hang + let result = + tokio::time::timeout(std::time::Duration::from_millis(100), cache.contains(42)).await; + assert_eq!( + result, + Ok(true), + "should not hang on already-completed entry" + ); + } + + // -- get_or_resolve tests -- + + #[tokio::test] + async fn get_or_resolve_returns_existing() { + let cache = test_cache(); + cache + .insert_icb( + 42, + TestIcb { + rc: 1, + path: "/existing".into(), + }, + ) + .await; + + let path: Result = cache.get_or_resolve(42, |icb| icb.path.clone()).await; + assert_eq!(path, Ok(PathBuf::from("/existing"))); + } + + #[tokio::test] + async fn get_or_resolve_resolves_missing() { + let resolver = TestResolver::new(); + resolver.add( + 42, + TestIcb { + rc: 1, + path: "/resolved".into(), + }, + ); + let cache = test_cache_with(resolver); + + let path: Result = cache.get_or_resolve(42, |icb| icb.path.clone()).await; + assert_eq!(path, Ok(PathBuf::from("/resolved"))); + // Should now be cached + assert!(cache.contains(42).await); + } + + #[tokio::test] + async fn get_or_resolve_propagates_error() { + let resolver = TestResolver::new(); + resolver.add_err(42, "network error"); + let cache = test_cache_with(resolver); + + let result: Result = + cache.get_or_resolve(42, |icb| icb.path.clone()).await; + assert_eq!(result, Err("network error".to_owned())); + // Entry should be cleaned up on error + assert!(!cache.contains(42).await); + } + + struct CountingResolver { + count: Arc, + } + + impl IcbResolver for CountingResolver { + type Icb = TestIcb; + type Error = String; + + fn resolve( + &self, + _ino: Inode, + ) -> impl Future> + Send { + self.count.fetch_add(1, Ordering::SeqCst); + async { + tokio::task::yield_now().await; + Ok(TestIcb { + rc: 1, + path: "/coalesced".into(), + }) + } + } + } + + #[tokio::test] + async fn get_or_resolve_coalesces_concurrent_requests() { + use std::sync::atomic::AtomicUsize; + + let resolve_count = Arc::new(AtomicUsize::new(0)); + + let cache = Arc::new(AsyncICache::new( + CountingResolver { + count: Arc::clone(&resolve_count), + }, + 1, + "/root", + )); + + let mut handles = Vec::new(); + for _ in 0..5 { + let c = Arc::clone(&cache); + handles.push(tokio::spawn(async move { + c.get_or_resolve(42, |icb| icb.path.clone()).await + })); + } + + for h in handles { + assert_eq!( + h.await.expect("task panicked"), + Ok(PathBuf::from("/coalesced")), + ); + } + + // Resolver should only have been called ONCE (not 5 times) + assert_eq!( + resolve_count.load(Ordering::SeqCst), + 1, + "should coalesce to 1 resolve call" + ); + } +} diff --git a/src/fs/icache/mod.rs b/src/fs/icache/mod.rs index 91f8bdd..a9113d4 100644 --- a/src/fs/icache/mod.rs +++ b/src/fs/icache/mod.rs @@ -1,9 +1,15 @@ //! Generic directory cache and inode management primitives. +#[cfg_attr(not(test), expect(dead_code))] +pub mod async_cache; pub mod bridge; mod cache; mod inode_factory; +#[expect(unused_imports)] +pub use async_cache::AsyncICache; +#[expect(unused_imports)] +pub use async_cache::IcbResolver; pub use cache::ICache; pub use inode_factory::InodeFactory; From 60d09c14d4195229b2d36ac70aadcaf463f24b03 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 11:07:53 -0800 Subject: [PATCH 02/57] feat(icache): add needs_resolve to IcbLike trait --- src/fs/icache/async_cache.rs | 3 +++ src/fs/icache/mod.rs | 2 ++ src/fs/local.rs | 4 ++++ src/fs/mescloud/icache.rs | 4 ++++ 4 files changed, 13 insertions(+) diff --git a/src/fs/icache/async_cache.rs b/src/fs/icache/async_cache.rs index d10d3ac..77f4f2a 100644 --- a/src/fs/icache/async_cache.rs +++ b/src/fs/icache/async_cache.rs @@ -342,6 +342,9 @@ mod tests { fn rc_mut(&mut self) -> &mut u64 { &mut self.rc } + fn needs_resolve(&self) -> bool { + false + } } struct TestResolver { diff --git a/src/fs/icache/mod.rs b/src/fs/icache/mod.rs index a9113d4..7c1f7c0 100644 --- a/src/fs/icache/mod.rs +++ b/src/fs/icache/mod.rs @@ -19,4 +19,6 @@ pub trait IcbLike { fn new_root(path: std::path::PathBuf) -> Self; fn rc(&self) -> u64; fn rc_mut(&mut self) -> &mut u64; + /// Returns true if this entry needs resolution (e.g., attr not yet fetched). + fn needs_resolve(&self) -> bool; } diff --git a/src/fs/local.rs b/src/fs/local.rs index 73e41de..b568efd 100644 --- a/src/fs/local.rs +++ b/src/fs/local.rs @@ -155,6 +155,10 @@ impl IcbLike for InodeControlBlock { fn rc_mut(&mut self) -> &mut u64 { &mut self.rc } + + fn needs_resolve(&self) -> bool { + false // local FS entries are always fully resolved + } } pub struct LocalFs { diff --git a/src/fs/mescloud/icache.rs b/src/fs/mescloud/icache.rs index d6d9ab9..a4076ba 100644 --- a/src/fs/mescloud/icache.rs +++ b/src/fs/mescloud/icache.rs @@ -39,6 +39,10 @@ impl IcbLike for InodeControlBlock { fn rc_mut(&mut self) -> &mut u64 { &mut self.rc } + + fn needs_resolve(&self) -> bool { + self.attr.is_none() + } } /// Calculate the number of blocks needed for a given size. From 88d313b55e310a3d234ac962c5d2e498e0f7c2e0 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 11:14:26 -0800 Subject: [PATCH 03/57] feat(icache): extend IcbResolver with stub+cache params, handle stubs in get_or_resolve --- src/fs/icache/async_cache.rs | 139 ++++++++++++++++++++++++++++++----- src/fs/icache/mod.rs | 3 - 2 files changed, 120 insertions(+), 22 deletions(-) diff --git a/src/fs/icache/async_cache.rs b/src/fs/icache/async_cache.rs index 77f4f2a..1feb516 100644 --- a/src/fs/icache/async_cache.rs +++ b/src/fs/icache/async_cache.rs @@ -31,8 +31,19 @@ pub trait IcbResolver: Send + Sync { /// Error type returned when resolution fails. type Error: Send; - /// Resolve an inode to its control block. - fn resolve(&self, ino: Inode) -> impl Future> + Send; + /// Resolve an inode to a fully-populated control block. + /// + /// - `stub`: `Some(icb)` if upgrading an existing stub entry, `None` if creating + /// from scratch. The stub typically has `parent` and `path` set but `attr` missing. + /// - `cache`: reference to the cache, useful for walking parent chains to build paths. + fn resolve( + &self, + ino: Inode, + stub: Option, + cache: &AsyncICache, + ) -> impl Future> + Send + where + Self: Sized; } /// Async, concurrency-safe inode cache. @@ -183,9 +194,11 @@ impl AsyncICache { } } - /// Look up `ino`. If `Available`, run `then` and return `Ok(T)`. - /// If absent, call the resolver to fetch the ICB, cache it, then run `then`. - /// If another task is already resolving this inode (`InFlight`), wait for it. + /// Look up `ino`. If `Available` and fully resolved, run `then` and return + /// `Ok(T)`. If `Available` but `needs_resolve()` is true (stub), extract + /// the stub, resolve it, cache the result, then run `then`. If absent, call + /// the resolver to fetch the ICB, cache it, then run `then`. If another task + /// is already resolving this inode (`InFlight`), wait for it. /// /// Returns `Err(R::Error)` if resolution fails. On error the `InFlight` /// entry is removed so subsequent calls can retry. @@ -198,58 +211,81 @@ impl AsyncICache { let mut then_fn = Some(then); - // Fast path: already Available + // Fast path: Available and fully resolved { let hit = self .inode_table .read_async(&ino, |_, s| match s { - IcbState::Available(icb) => { + IcbState::Available(icb) if !icb.needs_resolve() => { let t = then_fn.take().unwrap_or_else(|| unreachable!()); Some(t(icb)) } - IcbState::InFlight(_) => None, + _ => None, }) .await; if let Some(Some(r)) = hit { return Ok(r); } - // InFlight or absent -- fall through } - // Try to become the resolver, or wait on existing InFlight + // Slow path: missing, InFlight, or stub needing resolution loop { match self.inode_table.entry_async(ino).await { Entry::Occupied(mut occ) => match occ.get_mut() { - IcbState::Available(icb) => { + IcbState::Available(icb) if !icb.needs_resolve() => { let t = then_fn.take().unwrap_or_else(|| unreachable!()); return Ok(t(icb)); } + IcbState::Available(_) => { + // Stub needing resolution — extract stub, replace with InFlight + let (tx, rx) = watch::channel(()); + let old = std::mem::replace(occ.get_mut(), IcbState::InFlight(rx)); + let stub = match old { + IcbState::Available(icb) => icb, + _ => unreachable!(), + }; + drop(occ); // release shard lock before awaiting + + match self.resolver.resolve(ino, Some(stub), self).await { + Ok(icb) => { + let t = then_fn.take().unwrap_or_else(|| unreachable!()); + let result = t(&icb); + self.inode_table + .upsert_async(ino, IcbState::Available(icb)) + .await; + drop(tx); + return Ok(result); + } + Err(e) => { + self.inode_table.remove_async(&ino).await; + drop(tx); + return Err(e); + } + } + } IcbState::InFlight(rx) => { let mut rx = rx.clone(); drop(occ); let _ = rx.changed().await; - // Loop back to re-check } }, Entry::Vacant(vac) => { - // We win the race -- install InFlight and resolve let (tx, rx) = watch::channel(()); vac.insert_entry(IcbState::InFlight(rx)); - match self.resolver.resolve(ino).await { + match self.resolver.resolve(ino, None, self).await { Ok(icb) => { let t = then_fn.take().unwrap_or_else(|| unreachable!()); let result = t(&icb); self.inode_table .upsert_async(ino, IcbState::Available(icb)) .await; - drop(tx); // wake all waiters + drop(tx); return Ok(result); } Err(e) => { - // Remove the InFlight entry self.inode_table.remove_async(&ino).await; - drop(tx); // wake all waiters -- they'll see entry missing + drop(tx); return Err(e); } } @@ -308,6 +344,17 @@ impl AsyncICache { None } + /// Synchronous mutable access to an `Available` entry. + /// Does **not** wait for `InFlight`. Intended for initialization. + pub fn get_icb_mut_sync(&self, ino: Inode, f: impl FnOnce(&mut R::Icb) -> T) -> Option { + self.inode_table + .update_sync(&ino, |_, state| match state { + IcbState::Available(icb) => Some(f(icb)), + IcbState::InFlight(_) => None, + }) + .flatten() + } + /// Iterate over all `Available` entries (skips `InFlight`). pub fn for_each(&self, mut f: impl FnMut(&Inode, &R::Icb)) { self.inode_table.iter_sync(|ino, state| { @@ -330,11 +377,16 @@ mod tests { struct TestIcb { rc: u64, path: PathBuf, + resolved: bool, } impl IcbLike for TestIcb { fn new_root(path: PathBuf) -> Self { - Self { rc: 1, path } + Self { + rc: 1, + path, + resolved: true, + } } fn rc(&self) -> u64 { self.rc @@ -343,7 +395,7 @@ mod tests { &mut self.rc } fn needs_resolve(&self) -> bool { - false + !self.resolved } } @@ -380,6 +432,8 @@ mod tests { fn resolve( &self, ino: Inode, + _stub: Option, + _cache: &AsyncICache, ) -> impl Future> + Send { let result = self .responses @@ -419,6 +473,7 @@ mod tests { TestIcb { rc: 1, path: "/test".into(), + resolved: true, }, ); let cache = Arc::new(test_cache_with(resolver)); @@ -474,6 +529,7 @@ mod tests { TestIcb { rc: 1, path: "/loaded".into(), + resolved: true, }, ); let cache = test_cache_with(resolver); @@ -497,6 +553,7 @@ mod tests { TestIcb { rc: 1, path: "/foo".into(), + resolved: true, }, ) .await; @@ -513,6 +570,7 @@ mod tests { || TestIcb { rc: 0, path: "/new".into(), + resolved: true, }, |icb| { *icb.rc_mut() += 1; @@ -532,6 +590,7 @@ mod tests { TestIcb { rc: 5, path: "/existing".into(), + resolved: true, }, ) .await; @@ -554,6 +613,7 @@ mod tests { TestIcb { rc: 1, path: "/resolved".into(), + resolved: true, }, ); let cache = Arc::new(test_cache_with(resolver)); @@ -588,6 +648,7 @@ mod tests { TestIcb { rc: 1, path: "/a".into(), + resolved: true, }, ) .await; @@ -604,6 +665,7 @@ mod tests { TestIcb { rc: 5, path: "/a".into(), + resolved: true, }, ) .await; @@ -624,6 +686,7 @@ mod tests { TestIcb { rc: 3, path: "/a".into(), + resolved: true, }, ) .await; @@ -650,6 +713,7 @@ mod tests { TestIcb { rc: 1, path: "/a".into(), + resolved: true, }, ) .await; @@ -659,6 +723,7 @@ mod tests { TestIcb { rc: 1, path: "/b".into(), + resolved: true, }, ) .await; @@ -716,6 +781,7 @@ mod tests { TestIcb { rc: 1, path: "/fast".into(), + resolved: true, }, ) .await; @@ -742,6 +808,7 @@ mod tests { TestIcb { rc: 1, path: "/existing".into(), + resolved: true, }, ) .await; @@ -758,6 +825,7 @@ mod tests { TestIcb { rc: 1, path: "/resolved".into(), + resolved: true, }, ); let cache = test_cache_with(resolver); @@ -792,6 +860,8 @@ mod tests { fn resolve( &self, _ino: Inode, + _stub: Option, + _cache: &AsyncICache, ) -> impl Future> + Send { self.count.fetch_add(1, Ordering::SeqCst); async { @@ -799,6 +869,7 @@ mod tests { Ok(TestIcb { rc: 1, path: "/coalesced".into(), + resolved: true, }) } } @@ -840,4 +911,34 @@ mod tests { "should coalesce to 1 resolve call" ); } + + #[tokio::test] + async fn get_or_resolve_resolves_stub_entry() { + let resolver = TestResolver::new(); + resolver.add( + 42, + TestIcb { + rc: 1, + path: "/resolved".into(), + resolved: true, + }, + ); + let cache = test_cache_with(resolver); + + // Insert unresolved stub + cache + .insert_icb( + 42, + TestIcb { + rc: 0, + path: "/stub".into(), + resolved: false, + }, + ) + .await; + + // get_or_resolve should trigger resolution because needs_resolve() == true + let path: Result = cache.get_or_resolve(42, |icb| icb.path.clone()).await; + assert_eq!(path, Ok(PathBuf::from("/resolved"))); + } } diff --git a/src/fs/icache/mod.rs b/src/fs/icache/mod.rs index 7c1f7c0..c596f85 100644 --- a/src/fs/icache/mod.rs +++ b/src/fs/icache/mod.rs @@ -1,14 +1,11 @@ //! Generic directory cache and inode management primitives. -#[cfg_attr(not(test), expect(dead_code))] pub mod async_cache; pub mod bridge; mod cache; mod inode_factory; -#[expect(unused_imports)] pub use async_cache::AsyncICache; -#[expect(unused_imports)] pub use async_cache::IcbResolver; pub use cache::ICache; pub use inode_factory::InodeFactory; From c3af0b73f73caafaab79bbbf7a4464ad54e450ee Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 11:16:56 -0800 Subject: [PATCH 04/57] feat(icache): make InodeFactory atomic --- src/fs/icache/inode_factory.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/fs/icache/inode_factory.rs b/src/fs/icache/inode_factory.rs index 3f8f95e..9dcbe65 100644 --- a/src/fs/icache/inode_factory.rs +++ b/src/fs/icache/inode_factory.rs @@ -1,18 +1,19 @@ +use std::sync::atomic::{AtomicU64, Ordering}; use crate::fs::r#trait::Inode; /// Monotonically increasing inode allocator. pub struct InodeFactory { - next_inode: Inode, + next_inode: AtomicU64, } impl InodeFactory { pub fn new(start: Inode) -> Self { - Self { next_inode: start } + Self { + next_inode: AtomicU64::new(start), + } } - pub fn allocate(&mut self) -> Inode { - let ino = self.next_inode; - self.next_inode += 1; - ino + pub fn allocate(&self) -> Inode { + self.next_inode.fetch_add(1, Ordering::Relaxed) } } From ee8507a08e4dc726908f60e22778d705d5caf5b6 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 11:20:06 -0800 Subject: [PATCH 05/57] feat(mescloud): rewrite MescloudICache as generic over IcbResolver --- src/fs/mescloud/icache.rs | 262 +++++++++++++++++++++----------------- 1 file changed, 145 insertions(+), 117 deletions(-) diff --git a/src/fs/mescloud/icache.rs b/src/fs/mescloud/icache.rs index a4076ba..5c58c8f 100644 --- a/src/fs/mescloud/icache.rs +++ b/src/fs/mescloud/icache.rs @@ -3,20 +3,16 @@ use std::ffi::OsStr; use std::time::SystemTime; -use tracing::warn; - -use crate::fs::icache::{ICache, IcbLike, InodeFactory}; +use crate::fs::icache::{AsyncICache, IcbLike, IcbResolver, InodeFactory}; use crate::fs::r#trait::{ - CommonFileAttr, DirEntry, DirEntryType, FileAttr, FilesystemStats, Inode, Permissions, + CommonFileAttr, FileAttr, FileHandle, FilesystemStats, Inode, Permissions, }; -/// Inode control block for mescloud filesystem layers (`MesaFS`, `OrgFs`, `RepoFs`). +/// Inode control block for mescloud filesystem layers. pub struct InodeControlBlock { - /// The root inode doesn't have a parent. pub parent: Option, pub rc: u64, pub path: std::path::PathBuf, - pub children: Option>, /// Cached file attributes from the last lookup. pub attr: Option, } @@ -27,7 +23,6 @@ impl IcbLike for InodeControlBlock { rc: 1, parent: None, path, - children: None, attr: None, } } @@ -50,151 +45,147 @@ pub fn blocks_of_size(block_size: u32, size: u64) -> u64 { size.div_ceil(u64::from(block_size)) } -/// Mescloud-specific directory cache. -/// -/// Wraps [`ICache`] and adds inode allocation, attribute -/// caching, `ensure_child_inode`, and filesystem metadata. -pub struct MescloudICache { - inner: ICache, - inode_factory: InodeFactory, +/// Free function -- usable by both MescloudICache and resolvers. +pub fn make_common_file_attr( + ino: Inode, + perm: u16, + atime: SystemTime, + mtime: SystemTime, fs_owner: (u32, u32), block_size: u32, -} - -impl std::ops::Deref for MescloudICache { - type Target = ICache; - fn deref(&self) -> &Self::Target { - &self.inner +) -> CommonFileAttr { + CommonFileAttr { + ino, + atime, + mtime, + ctime: SystemTime::UNIX_EPOCH, + crtime: SystemTime::UNIX_EPOCH, + perm: Permissions::from_bits_truncate(perm), + nlink: 1, + uid: fs_owner.0, + gid: fs_owner.1, + blksize: block_size, } } -impl std::ops::DerefMut for MescloudICache { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.inner - } +/// Mescloud-specific directory cache wrapper over AsyncICache. +pub struct MescloudICache> { + inner: AsyncICache, + inode_factory: InodeFactory, + fs_owner: (u32, u32), + block_size: u32, } -impl MescloudICache { +impl> MescloudICache { /// Create a new `MescloudICache`. Initializes root ICB (rc=1), caches root dir attr. - pub fn new(root_ino: Inode, fs_owner: (u32, u32), block_size: u32) -> Self { - let mut icache = Self { - inner: ICache::new(root_ino, "/"), + pub fn new(resolver: R, root_ino: Inode, fs_owner: (u32, u32), block_size: u32) -> Self { + let cache = Self { + inner: AsyncICache::new(resolver, root_ino, "/"), inode_factory: InodeFactory::new(root_ino + 1), fs_owner, block_size, }; + // Set root directory attr synchronously during initialization let now = SystemTime::now(); let root_attr = FileAttr::Directory { - common: icache.make_common_file_attr(root_ino, 0o755, now, now), + common: make_common_file_attr(root_ino, 0o755, now, now, fs_owner, block_size), }; - icache.cache_attr(root_ino, root_attr); - icache + cache.inner.get_icb_mut_sync(root_ino, |icb| { + icb.attr = Some(root_attr); + }); + + cache } - /// Allocate a new inode number. - pub fn allocate_inode(&mut self) -> Inode { - self.inode_factory.allocate() + // -- Delegated from AsyncICache (async) -- + + pub async fn contains(&self, ino: Inode) -> bool { + self.inner.contains(ino).await } - pub fn get_attr(&self, ino: Inode) -> Option { - self.inner.get_icb(ino).and_then(|icb| icb.attr) + pub async fn get_icb(&self, ino: Inode, f: impl FnOnce(&InodeControlBlock) -> T) -> Option { + self.inner.get_icb(ino, f).await } - pub fn cache_attr(&mut self, ino: Inode, attr: FileAttr) { - if let Some(icb) = self.inner.get_icb_mut(ino) { - icb.attr = Some(attr); - } + pub async fn get_icb_mut( + &self, + ino: Inode, + f: impl FnOnce(&mut InodeControlBlock) -> T, + ) -> Option { + self.inner.get_icb_mut(ino, f).await } - /// Ensure a child inode exists under `parent` with the given `name` and `kind`. - /// Reuses existing inode if present. Does NOT bump rc. - pub fn ensure_child_inode( - &mut self, - parent: Inode, - name: &OsStr, - kind: DirEntryType, - ) -> (Inode, FileAttr) { - // Check existing child by parent + name. - let existing = self - .inner - .iter() - .find(|&(&_ino, icb)| icb.parent == Some(parent) && icb.path.as_os_str() == name) - .map(|(&ino, _)| ino); - - if let Some(existing_ino) = existing { - if let Some(attr) = self.inner.get_icb(existing_ino).and_then(|icb| icb.attr) { - return (existing_ino, attr); - } + pub async fn insert_icb(&self, ino: Inode, icb: InodeControlBlock) { + self.inner.insert_icb(ino, icb).await + } - warn!(ino = existing_ino, parent, name = ?name, ?kind, - "ensure_child_inode: attr missing on existing inode, rebuilding"); - let attr = self.make_attr_for_kind(existing_ino, kind); - self.cache_attr(existing_ino, attr); - return (existing_ino, attr); - } + pub async fn entry_or_insert_icb( + &self, + ino: Inode, + factory: impl FnOnce() -> InodeControlBlock, + then: impl FnOnce(&mut InodeControlBlock) -> T, + ) -> T { + self.inner.entry_or_insert_icb(ino, factory, then).await + } - let ino = self.inode_factory.allocate(); - self.inner.insert_icb( - ino, - InodeControlBlock { - rc: 0, - path: name.into(), - parent: Some(parent), - children: None, - attr: None, - }, - ); - - let attr = self.make_attr_for_kind(ino, kind); - self.cache_attr(ino, attr); - (ino, attr) - } - - pub fn make_common_file_attr( + pub async fn inc_rc(&self, ino: Inode) -> u64 { + self.inner.inc_rc(ino).await + } + + pub async fn forget(&self, ino: Inode, nlookups: u64) -> Option { + self.inner.forget(ino, nlookups).await + } + + pub async fn get_or_resolve( &self, ino: Inode, - perm: u16, - atime: SystemTime, - mtime: SystemTime, - ) -> CommonFileAttr { - CommonFileAttr { - ino, - atime, - mtime, - ctime: SystemTime::UNIX_EPOCH, - crtime: SystemTime::UNIX_EPOCH, - perm: Permissions::from_bits_truncate(perm), - nlink: 1, - uid: self.fs_owner.0, - gid: self.fs_owner.1, - blksize: self.block_size, - } + then: impl FnOnce(&InodeControlBlock) -> T, + ) -> Result { + self.inner.get_or_resolve(ino, then).await } - fn make_attr_for_kind(&self, ino: Inode, kind: DirEntryType) -> FileAttr { - let now = SystemTime::now(); - match kind { - DirEntryType::Directory => FileAttr::Directory { - common: self.make_common_file_attr(ino, 0o755, now, now), - }, - DirEntryType::RegularFile - | DirEntryType::Symlink - | DirEntryType::CharDevice - | DirEntryType::BlockDevice - | DirEntryType::NamedPipe - | DirEntryType::Socket => FileAttr::RegularFile { - common: self.make_common_file_attr(ino, 0o644, now, now), - size: 0, - blocks: 0, - }, - } + // -- Delegated (sync) -- + + pub fn allocate_fh(&self) -> FileHandle { + self.inner.allocate_fh() + } + + pub fn for_each(&self, f: impl FnMut(&Inode, &InodeControlBlock)) { + self.inner.for_each(f) + } + + pub fn inode_count(&self) -> usize { + self.inner.inode_count() + } + + // -- Domain-specific -- + + /// Allocate a new inode number. + pub fn allocate_inode(&self) -> Inode { + self.inode_factory.allocate() + } + + pub async fn get_attr(&self, ino: Inode) -> Option { + self.inner.get_icb(ino, |icb| icb.attr).await.flatten() + } + + pub async fn cache_attr(&self, ino: Inode, attr: FileAttr) { + self.inner + .get_icb_mut(ino, |icb| { + icb.attr = Some(attr); + }) + .await; } pub fn fs_owner(&self) -> (u32, u32) { self.fs_owner } + pub fn block_size(&self) -> u32 { + self.block_size + } + pub fn statfs(&self) -> FilesystemStats { FilesystemStats { block_size: self.block_size, @@ -210,4 +201,41 @@ impl MescloudICache { max_filename_length: 255, } } + + /// Find an existing child by (parent, name) or allocate a new inode. + /// If new, inserts a stub ICB (parent+path set, attr=None, rc=0). + /// Does NOT bump rc. Returns the inode number. + pub async fn ensure_child_ino(&self, parent: Inode, name: &OsStr) -> Inode { + // Search for existing child by parent + name + let mut existing_ino = None; + self.inner.for_each(|&ino, icb| { + if icb.parent == Some(parent) && icb.path.as_os_str() == name { + existing_ino = Some(ino); + } + }); + + if let Some(ino) = existing_ino { + return ino; + } + + // Allocate new inode and insert stub + let ino = self.inode_factory.allocate(); + self.inner + .insert_icb( + ino, + InodeControlBlock { + rc: 0, + path: name.into(), + parent: Some(parent), + attr: None, + }, + ) + .await; + ino + } + + /// Direct access to the inner async cache for resolvers that need it. + pub fn inner(&self) -> &AsyncICache { + &self.inner + } } From eed06ad5203825cabe9ac1a5c75f6cefd8782e11 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 11:26:31 -0800 Subject: [PATCH 06/57] feat(mescloud): implement RepoResolver, update RepoFs to use async icache --- src/fs/mescloud/repo.rs | 287 +++++++++++++++++++++++++++------------- 1 file changed, 198 insertions(+), 89 deletions(-) diff --git a/src/fs/mescloud/repo.rs b/src/fs/mescloud/repo.rs index 94f7ee8..ba94611 100644 --- a/src/fs/mescloud/repo.rs +++ b/src/fs/mescloud/repo.rs @@ -2,6 +2,7 @@ //! //! This module directly accesses the mesa repo through the Rust SDK, on a per-repo basis. +use std::future::Future; use std::{collections::HashMap, ffi::OsStr, path::PathBuf, time::SystemTime}; use base64::Engine as _; @@ -11,6 +12,7 @@ use mesa_dev::low_level::content::{Content, DirEntry as MesaDirEntry}; use num_traits::cast::ToPrimitive as _; use tracing::{instrument, trace, warn}; +use crate::fs::icache::{AsyncICache, IcbResolver}; use crate::fs::r#trait::{ DirEntry, DirEntryType, FileAttr, FileHandle, FileOpenOptions, FilesystemStats, Fs, Inode, LockOwner, OpenFile, OpenFlags, @@ -21,7 +23,132 @@ pub use super::common::{ GetAttrError, LookupError, OpenError, ReadDirError, ReadError, ReleaseError, }; use super::icache as mescloud_icache; -use super::icache::MescloudICache; +use super::icache::{InodeControlBlock, MescloudICache}; + +// --------------------------------------------------------------------------- +// RepoResolver +// --------------------------------------------------------------------------- + +pub(super) struct RepoResolver { + client: MesaClient, + org_name: String, + repo_name: String, + ref_: String, + fs_owner: (u32, u32), + block_size: u32, +} + +impl IcbResolver for RepoResolver { + type Icb = InodeControlBlock; + type Error = LookupError; + + fn resolve( + &self, + ino: Inode, + stub: Option, + cache: &AsyncICache, + ) -> impl Future> + Send + where + Self: Sized, + { + let client = self.client.clone(); + let org_name = self.org_name.clone(); + let repo_name = self.repo_name.clone(); + let ref_ = self.ref_.clone(); + let fs_owner = self.fs_owner; + let block_size = self.block_size; + + async move { + let stub = stub.expect("RepoResolver requires a stub ICB"); + let file_path = + build_repo_path(stub.parent, &stub.path, cache, RepoFs::ROOT_INO).await; + + let content = client + .org(&org_name) + .repos() + .at(&repo_name) + .content() + .get(Some(ref_.as_str()), file_path.as_deref(), None) + .await + .map_err(MesaApiError::from)?; + + let now = SystemTime::now(); + #[expect( + clippy::match_same_arms, + reason = "symlink arm will diverge once readlink is wired up" + )] + let attr = match &content { + Content::File(f) => { + let size = f.size.to_u64().unwrap_or(0); + FileAttr::RegularFile { + common: mescloud_icache::make_common_file_attr( + ino, 0o644, now, now, fs_owner, block_size, + ), + size, + blocks: mescloud_icache::blocks_of_size(block_size, size), + } + } + Content::Symlink(s) => { + let size = s.size.to_u64().unwrap_or(0); + FileAttr::RegularFile { + common: mescloud_icache::make_common_file_attr( + ino, 0o644, now, now, fs_owner, block_size, + ), + size, + blocks: mescloud_icache::blocks_of_size(block_size, size), + } + } + Content::Dir(_) => FileAttr::Directory { + common: mescloud_icache::make_common_file_attr( + ino, 0o755, now, now, fs_owner, block_size, + ), + }, + }; + + Ok(InodeControlBlock { + parent: stub.parent, + path: stub.path, + rc: stub.rc, + attr: Some(attr), + }) + } + } +} + +// --------------------------------------------------------------------------- +// build_repo_path helper +// --------------------------------------------------------------------------- + +/// Walk the parent chain in the cache to build the repo-relative path. +/// Returns `None` for the root inode (maps to `path=None` in the mesa content API). +async fn build_repo_path( + parent: Option, + name: &std::path::Path, + cache: &AsyncICache, + root_ino: Inode, +) -> Option { + let parent = parent?; + if parent == root_ino { + return name.to_str().map(String::from); + } + + let mut components = vec![name.to_path_buf()]; + let mut current = parent; + while current != root_ino { + let (path, next_parent) = cache + .get_icb(current, |icb| (icb.path.clone(), icb.parent)) + .await?; + components.push(path); + current = next_parent?; + } + components.reverse(); + let joined: PathBuf = components.iter().collect(); + joined.to_str().map(String::from) +} + +// --------------------------------------------------------------------------- +// RepoFs +// --------------------------------------------------------------------------- /// A filesystem rooted at a single mesa repository. /// @@ -33,7 +160,8 @@ pub struct RepoFs { repo_name: String, ref_: String, - icache: MescloudICache, + icache: MescloudICache, + readdir_buf: Vec, open_files: HashMap, } @@ -49,12 +177,21 @@ impl RepoFs { ref_: String, fs_owner: (u32, u32), ) -> Self { + let resolver = RepoResolver { + client: client.clone(), + org_name: org_name.clone(), + repo_name: repo_name.clone(), + ref_: ref_.clone(), + fs_owner, + block_size: Self::BLOCK_SIZE, + }; Self { client, org_name, repo_name, ref_, - icache: MescloudICache::new(Self::ROOT_INO, fs_owner, Self::BLOCK_SIZE), + icache: MescloudICache::new(resolver, Self::ROOT_INO, fs_owner, Self::BLOCK_SIZE), + readdir_buf: Vec::new(), open_files: HashMap::new(), } } @@ -65,15 +202,15 @@ impl RepoFs { } /// Get the cached attr for an inode, if present. - pub(crate) fn inode_table_get_attr(&self, ino: Inode) -> Option { - self.icache.get_attr(ino) + pub(crate) async fn inode_table_get_attr(&self, ino: Inode) -> Option { + self.icache.get_attr(ino).await } /// Build the repo-relative path for an inode by walking up the parent chain. /// /// Returns `None` for the root inode (the repo top-level maps to `path=None` in the /// mesa content API). - fn path_of_inode(&self, ino: Inode) -> Option { + async fn path_of_inode(&self, ino: Inode) -> Option { if ino == Self::ROOT_INO { return None; } @@ -81,26 +218,17 @@ impl RepoFs { let mut components = Vec::new(); let mut current = ino; while current != Self::ROOT_INO { - let icb = self.icache.get_icb(current)?; - components.push(icb.path.clone()); - current = icb.parent?; + let (path, parent) = self + .icache + .get_icb(current, |icb| (icb.path.clone(), icb.parent)) + .await?; + components.push(path); + current = parent?; } components.reverse(); let joined: PathBuf = components.iter().collect(); joined.to_str().map(String::from) } - - /// Build the repo-relative path for a child of `parent`. - fn path_of_child(&self, parent: Inode, name: &OsStr) -> Option { - if parent == Self::ROOT_INO { - return name.to_str().map(String::from); - } - self.path_of_inode(parent).and_then(|p| { - let mut pb = PathBuf::from(p); - pb.push(name); - pb.to_str().map(String::from) - }) - } } #[async_trait::async_trait] @@ -115,62 +243,19 @@ impl Fs for RepoFs { #[instrument(skip(self), fields(repo = %self.repo_name))] async fn lookup(&mut self, parent: Inode, name: &OsStr) -> Result { debug_assert!( - self.icache.contains(parent), + self.icache.contains(parent).await, "lookup: parent inode {parent} not in inode table" ); - let file_path = self.path_of_child(parent, name); - - let content = self - .client - .org(&self.org_name) - .repos() - .at(&self.repo_name) - .content() - .get(Some(self.ref_.as_str()), file_path.as_deref(), None) - .await - .map_err(MesaApiError::from)?; - - #[expect( - clippy::match_same_arms, - reason = "symlink arm will diverge once readlink is wired up" - )] - let kind = match &content { - Content::File(_) => DirEntryType::RegularFile, - // TODO(MES-712): return DirEntryType::Symlink and FileAttr::Symlink, then wire up readlink. - Content::Symlink(_) => DirEntryType::RegularFile, - Content::Dir(_) => DirEntryType::Directory, - }; - - let (ino, _) = self.icache.ensure_child_inode(parent, name, kind); - - let now = SystemTime::now(); - let attr = match &content { - Content::File(f) => { - let size = f.size.to_u64().unwrap_or(0); - FileAttr::RegularFile { - common: self.icache.make_common_file_attr(ino, 0o644, now, now), - size, - blocks: mescloud_icache::blocks_of_size(Self::BLOCK_SIZE, size), - } - } - // TODO(MES-712): return FileAttr::Symlink { target, size } and wire up readlink. - Content::Symlink(s) => { - let size = s.size.to_u64().unwrap_or(0); - FileAttr::RegularFile { - common: self.icache.make_common_file_attr(ino, 0o644, now, now), - size, - blocks: mescloud_icache::blocks_of_size(Self::BLOCK_SIZE, size), - } - } - Content::Dir(_) => FileAttr::Directory { - common: self.icache.make_common_file_attr(ino, 0o755, now, now), - }, - }; - self.icache.cache_attr(ino, attr); + let ino = self.icache.ensure_child_ino(parent, name).await; + let attr = self + .icache + .get_or_resolve(ino, |icb| icb.attr.expect("resolver should populate attr")) + .await?; + self.icache.cache_attr(ino, attr).await; - let rc = self.icache.inc_rc(ino); - trace!(ino, path = ?file_path, rc, "resolved inode"); + let rc = self.icache.inc_rc(ino).await; + trace!(ino, ?name, rc, "resolved inode"); Ok(attr) } @@ -180,7 +265,7 @@ impl Fs for RepoFs { ino: Inode, _fh: Option, ) -> Result { - self.icache.get_attr(ino).ok_or_else(|| { + self.icache.get_attr(ino).await.ok_or_else(|| { warn!(ino, "getattr on unknown inode"); GetAttrError::InodeNotFound }) @@ -189,18 +274,18 @@ impl Fs for RepoFs { #[instrument(skip(self), fields(repo = %self.repo_name))] async fn readdir(&mut self, ino: Inode) -> Result<&[DirEntry], ReadDirError> { debug_assert!( - self.icache.contains(ino), + self.icache.contains(ino).await, "readdir: inode {ino} not in inode table" ); debug_assert!( matches!( - self.icache.get_attr(ino), + self.icache.get_attr(ino).await, Some(FileAttr::Directory { .. }) | None ), "readdir: inode {ino} has non-directory cached attr" ); - let file_path = self.path_of_inode(ino); + let file_path = self.path_of_inode(ino).await; let content = self .client @@ -234,7 +319,34 @@ impl Fs for RepoFs { let mut entries = Vec::with_capacity(collected.len()); for (name, kind) in &collected { - let (child_ino, _) = self.icache.ensure_child_inode(ino, OsStr::new(name), *kind); + let child_ino = self.icache.ensure_child_ino(ino, OsStr::new(name)).await; + // Insert fully-populated attr (bypassing resolver) since we know the kind from the listing + let now = SystemTime::now(); + let attr = match kind { + DirEntryType::Directory => FileAttr::Directory { + common: mescloud_icache::make_common_file_attr( + child_ino, + 0o755, + now, + now, + self.icache.fs_owner(), + self.icache.block_size(), + ), + }, + _ => FileAttr::RegularFile { + common: mescloud_icache::make_common_file_attr( + child_ino, + 0o644, + now, + now, + self.icache.fs_owner(), + self.icache.block_size(), + ), + size: 0, + blocks: 0, + }, + }; + self.icache.cache_attr(child_ino, attr).await; entries.push(DirEntry { ino: child_ino, name: name.clone().into(), @@ -242,22 +354,19 @@ impl Fs for RepoFs { }); } - let icb = self - .icache - .get_icb_mut(ino) - .ok_or(ReadDirError::InodeNotFound)?; - Ok(icb.children.insert(entries)) + self.readdir_buf = entries; + Ok(&self.readdir_buf) } #[instrument(skip(self), fields(repo = %self.repo_name))] async fn open(&mut self, ino: Inode, _flags: OpenFlags) -> Result { - if !self.icache.contains(ino) { + if !self.icache.contains(ino).await { warn!(ino, "open on unknown inode"); return Err(OpenError::InodeNotFound); } debug_assert!( matches!( - self.icache.get_attr(ino), + self.icache.get_attr(ino).await, Some(FileAttr::RegularFile { .. }) | None ), "open: inode {ino} has non-file cached attr" @@ -291,13 +400,13 @@ impl Fs for RepoFs { ); debug_assert!( matches!( - self.icache.get_attr(ino), + self.icache.get_attr(ino).await, Some(FileAttr::RegularFile { .. }) | None ), "read: inode {ino} has non-file cached attr" ); - let file_path = self.path_of_inode(ino); + let file_path = self.path_of_inode(ino).await; let content = self .client @@ -350,11 +459,11 @@ impl Fs for RepoFs { #[instrument(skip(self), fields(repo = %self.repo_name))] async fn forget(&mut self, ino: Inode, nlookups: u64) { debug_assert!( - self.icache.contains(ino), + self.icache.contains(ino).await, "forget: inode {ino} not in inode table" ); - self.icache.forget(ino, nlookups); + self.icache.forget(ino, nlookups).await; } async fn statfs(&mut self) -> Result { From 8de5693c53080194177fcf34011dfc01cb6c5b3c Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 11:31:36 -0800 Subject: [PATCH 07/57] feat(mescloud): implement OrgResolver, update OrgFs to use async icache --- src/fs/mescloud/org.rs | 351 +++++++++++++++++++++++++---------------- 1 file changed, 213 insertions(+), 138 deletions(-) diff --git a/src/fs/mescloud/org.rs b/src/fs/mescloud/org.rs index fcfd522..a404393 100644 --- a/src/fs/mescloud/org.rs +++ b/src/fs/mescloud/org.rs @@ -1,5 +1,6 @@ use std::collections::HashMap; use std::ffi::OsStr; +use std::future::Future; use std::time::SystemTime; use bytes::Bytes; @@ -12,14 +13,65 @@ pub use super::common::{ GetAttrError, LookupError, OpenError, ReadDirError, ReadError, ReleaseError, }; use super::common::{InodeControlBlock, MesaApiError}; +use super::icache as mescloud_icache; use super::icache::MescloudICache; use super::repo::RepoFs; use crate::fs::icache::bridge::HashMapBridge; +use crate::fs::icache::{AsyncICache, IcbResolver}; use crate::fs::r#trait::{ DirEntry, DirEntryType, FileAttr, FileHandle, FilesystemStats, Fs, Inode, LockOwner, OpenFile, OpenFlags, }; +// --------------------------------------------------------------------------- +// OrgResolver +// --------------------------------------------------------------------------- + +pub(super) struct OrgResolver { + fs_owner: (u32, u32), + block_size: u32, +} + +impl IcbResolver for OrgResolver { + type Icb = InodeControlBlock; + type Error = LookupError; + + fn resolve( + &self, + ino: Inode, + stub: Option, + _cache: &AsyncICache, + ) -> impl Future> + Send + where + Self: Sized, + { + let fs_owner = self.fs_owner; + let block_size = self.block_size; + async move { + let stub = stub.unwrap_or_else(|| InodeControlBlock { + parent: None, + path: "/".into(), + rc: 0, + attr: None, + }); + let now = SystemTime::now(); + let attr = FileAttr::Directory { + common: mescloud_icache::make_common_file_attr( + ino, 0o755, now, now, fs_owner, block_size, + ), + }; + Ok(InodeControlBlock { + attr: Some(attr), + ..stub + }) + } + } +} + +// --------------------------------------------------------------------------- +// OrgFs +// --------------------------------------------------------------------------- + #[derive(Debug, Clone)] pub struct OrgConfig { pub name: String, @@ -50,7 +102,8 @@ pub struct OrgFs { name: String, client: MesaClient, - icache: MescloudICache, + icache: MescloudICache, + readdir_buf: Vec, /// Maps org-level repo-root inodes → index into `repos`. repo_inodes: HashMap, @@ -96,18 +149,25 @@ impl OrgFs { /// Ensure an inode exists for a virtual owner directory (github only). Does NOT bump rc. /// TODO(MES-674): Cleanup "special" casing for github. - fn ensure_owner_inode(&mut self, owner: &str) -> (Inode, FileAttr) { + async fn ensure_owner_inode(&mut self, owner: &str) -> (Inode, FileAttr) { // Check existing for (&ino, existing_owner) in &self.owner_inodes { if existing_owner == owner { - if let Some(attr) = self.icache.get_attr(ino) { + if let Some(attr) = self.icache.get_attr(ino).await { return (ino, attr); } let now = SystemTime::now(); let attr = FileAttr::Directory { - common: self.icache.make_common_file_attr(ino, 0o755, now, now), + common: mescloud_icache::make_common_file_attr( + ino, + 0o755, + now, + now, + self.icache.fs_owner(), + self.icache.block_size(), + ), }; - self.icache.cache_attr(ino, attr); + self.icache.cache_attr(ino, attr).await; return (ino, attr); } } @@ -115,34 +175,47 @@ impl OrgFs { // Allocate new let ino = self.icache.allocate_inode(); let now = SystemTime::now(); - self.icache.insert_icb( - ino, - InodeControlBlock { - rc: 0, - path: owner.into(), - parent: Some(Self::ROOT_INO), - children: None, - attr: None, - }, - ); + self.icache + .insert_icb( + ino, + InodeControlBlock { + rc: 0, + path: owner.into(), + parent: Some(Self::ROOT_INO), + attr: None, + }, + ) + .await; self.owner_inodes.insert(ino, owner.to_owned()); let attr = FileAttr::Directory { - common: self.icache.make_common_file_attr(ino, 0o755, now, now), + common: mescloud_icache::make_common_file_attr( + ino, + 0o755, + now, + now, + self.icache.fs_owner(), + self.icache.block_size(), + ), }; - self.icache.cache_attr(ino, attr); + self.icache.cache_attr(ino, attr).await; (ino, attr) } /// Get the cached attr for an inode, if present. - pub(crate) fn inode_table_get_attr(&self, ino: Inode) -> Option { - self.icache.get_attr(ino) + pub(crate) async fn inode_table_get_attr(&self, ino: Inode) -> Option { + self.icache.get_attr(ino).await } pub fn new(name: String, client: MesaClient, fs_owner: (u32, u32)) -> Self { + let resolver = OrgResolver { + fs_owner, + block_size: Self::BLOCK_SIZE, + }; Self { name, client, - icache: MescloudICache::new(Self::ROOT_INO, fs_owner, Self::BLOCK_SIZE), + icache: MescloudICache::new(resolver, Self::ROOT_INO, fs_owner, Self::BLOCK_SIZE), + readdir_buf: Vec::new(), repo_inodes: HashMap::new(), owner_inodes: HashMap::new(), repos: Vec::new(), @@ -150,7 +223,7 @@ impl OrgFs { } /// Classify an inode by its role. - fn inode_role(&self, ino: Inode) -> InodeRole { + async fn inode_role(&self, ino: Inode) -> InodeRole { if ino == Self::ROOT_INO { return InodeRole::OrgRoot; } @@ -161,7 +234,7 @@ impl OrgFs { return InodeRole::RepoOwned { idx }; } // Walk parent chain to find owning repo. - if let Some(idx) = self.repo_slot_for_inode(ino) { + if let Some(idx) = self.repo_slot_for_inode(ino).await { return InodeRole::RepoOwned { idx }; } // Shouldn't happen — all non-root inodes should be repo-owned. @@ -174,24 +247,24 @@ impl OrgFs { } /// Find the repo slot index that owns `ino` by walking the parent chain. - fn repo_slot_for_inode(&self, ino: Inode) -> Option { + async fn repo_slot_for_inode(&self, ino: Inode) -> Option { // Direct repo root? if let Some(&idx) = self.repo_inodes.get(&ino) { return Some(idx); } // Walk parents. let mut current = ino; - while let Some(parent) = self.icache.get_icb(current).and_then(|icb| icb.parent) { + loop { + let parent = self + .icache + .get_icb(current, |icb| icb.parent) + .await + .flatten()?; if let Some(&idx) = self.repo_inodes.get(&parent) { return Some(idx); } current = parent; } - trace!( - ino, - "repo_slot_for_inode: exhausted parent chain without finding repo" - ); - None } /// Ensure an inode + `RepoFs` exists for the given repo name. @@ -200,7 +273,7 @@ impl OrgFs { /// - `repo_name`: name used for API calls / `RepoFs` (base64-encoded for github) /// - `display_name`: name shown in filesystem ("linux" for github, same as `repo_name` otherwise) /// - `parent_ino`: owner-dir inode for github, `ROOT_INO` otherwise - fn ensure_repo_inode( + async fn ensure_repo_inode( &mut self, repo_name: &str, display_name: &str, @@ -210,13 +283,12 @@ impl OrgFs { // Check existing repos. for (&ino, &idx) in &self.repo_inodes { if self.repos[idx].repo.repo_name() == repo_name { - if let Some(icb) = self.icache.get_icb(ino) - && let Some(attr) = icb.attr - { + if let Some(attr) = self.icache.get_attr(ino).await { + let rc = self.icache.get_icb(ino, |icb| icb.rc).await.unwrap_or(0); trace!( ino, repo = repo_name, - rc = icb.rc, + rc, "ensure_repo_inode: reusing" ); return (ino, attr); @@ -229,9 +301,16 @@ impl OrgFs { ); let now = SystemTime::now(); let attr = FileAttr::Directory { - common: self.icache.make_common_file_attr(ino, 0o755, now, now), + common: mescloud_icache::make_common_file_attr( + ino, + 0o755, + now, + now, + self.icache.fs_owner(), + self.icache.block_size(), + ), }; - self.icache.cache_attr(ino, attr); + self.icache.cache_attr(ino, attr).await; return (ino, attr); } } @@ -245,16 +324,17 @@ impl OrgFs { ); let now = SystemTime::now(); - self.icache.insert_icb( - ino, - InodeControlBlock { - rc: 0, - path: display_name.into(), - parent: Some(parent_ino), - children: None, - attr: None, - }, - ); + self.icache + .insert_icb( + ino, + InodeControlBlock { + rc: 0, + path: display_name.into(), + parent: Some(parent_ino), + attr: None, + }, + ) + .await; let repo = RepoFs::new( self.client.clone(), @@ -272,9 +352,16 @@ impl OrgFs { self.repo_inodes.insert(ino, idx); let attr = FileAttr::Directory { - common: self.icache.make_common_file_attr(ino, 0o755, now, now), + common: mescloud_icache::make_common_file_attr( + ino, + 0o755, + now, + now, + self.icache.fs_owner(), + self.icache.block_size(), + ), }; - self.icache.cache_attr(ino, attr); + self.icache.cache_attr(ino, attr).await; (ino, attr) } @@ -300,8 +387,8 @@ impl OrgFs { } /// Translate a repo inode to an org inode, allocating if needed. - /// Also mirrors the ICB into the org's `inode_table`. - fn translate_repo_ino_to_org( + /// Also mirrors the ICB into the org's inode table. + async fn translate_repo_ino_to_org( &mut self, slot_idx: usize, repo_ino: Inode, @@ -313,28 +400,31 @@ impl OrgFs { .bridge .backward_or_insert_inode(repo_ino, || self.icache.allocate_inode()); - // Ensure there's an ICB in the org table. - let icb = self.icache.entry_or_insert_icb(org_ino, || { - trace!( + self.icache + .entry_or_insert_icb( org_ino, - repo_ino, - parent = parent_org_ino, - ?name, - "translate: created new org ICB" - ); - InodeControlBlock { - rc: 0, - path: name.into(), - parent: Some(parent_org_ino), - children: None, - attr: None, - } - }); - - // Log reuse case. - if icb.rc > 0 || icb.attr.is_some() { - trace!(org_ino, repo_ino, "translate: reused existing org ICB"); - } + || { + trace!( + org_ino, + repo_ino, + parent = parent_org_ino, + ?name, + "translate: created new org ICB" + ); + InodeControlBlock { + rc: 0, + path: name.into(), + parent: Some(parent_org_ino), + attr: None, + } + }, + |icb| { + if icb.rc > 0 || icb.attr.is_some() { + trace!(org_ino, repo_ino, "translate: reused existing org ICB"); + } + }, + ) + .await; org_ino } @@ -351,12 +441,7 @@ impl Fs for OrgFs { #[instrument(skip(self), fields(org = %self.name))] async fn lookup(&mut self, parent: Inode, name: &OsStr) -> Result { - debug_assert!( - self.icache.contains(parent), - "lookup: parent inode {parent} not in inode table" - ); - - match self.inode_role(parent) { + match self.inode_role(parent).await { InodeRole::OrgRoot => { // TODO(MES-674): Cleanup "special" casing for github. let name_str = name.to_str().ok_or(LookupError::InodeNotFound)?; @@ -364,8 +449,8 @@ impl Fs for OrgFs { if self.is_github() { // name is an owner like "torvalds" — create lazily, no API validation. trace!(owner = name_str, "lookup: resolving github owner dir"); - let (ino, attr) = self.ensure_owner_inode(name_str); - self.icache.inc_rc(ino); + let (ino, attr) = self.ensure_owner_inode(name_str).await; + self.icache.inc_rc(ino).await; Ok(attr) } else { // Children of org root are repos. @@ -374,13 +459,15 @@ impl Fs for OrgFs { // Validate repo exists via API. let repo = self.wait_for_sync(name_str).await?; - let (ino, attr) = self.ensure_repo_inode( - name_str, - name_str, - &repo.default_branch, - Self::ROOT_INO, - ); - let rc = self.icache.inc_rc(ino); + let (ino, attr) = self + .ensure_repo_inode( + name_str, + name_str, + &repo.default_branch, + Self::ROOT_INO, + ) + .await; + let rc = self.icache.inc_rc(ino).await; trace!(ino, repo = name_str, rc, "lookup: resolved repo inode"); Ok(attr) } @@ -407,9 +494,10 @@ impl Fs for OrgFs { // Validate via API (uses encoded name). let repo = self.wait_for_sync(&encoded).await?; - let (ino, attr) = - self.ensure_repo_inode(&encoded, repo_name_str, &repo.default_branch, parent); - self.icache.inc_rc(ino); + let (ino, attr) = self + .ensure_repo_inode(&encoded, repo_name_str, &repo.default_branch, parent) + .await; + self.icache.inc_rc(ino).await; Ok(attr) } InodeRole::RepoOwned { idx } => { @@ -425,12 +513,13 @@ impl Fs for OrgFs { // Translate back to org namespace. let kind: DirEntryType = repo_attr.into(); - let org_ino = self.translate_repo_ino_to_org(idx, repo_ino, parent, name, kind); + let org_ino = + self.translate_repo_ino_to_org(idx, repo_ino, parent, name, kind).await; // Rebuild attr with org inode. let org_attr = self.repos[idx].bridge.attr_backward(repo_attr); - self.icache.cache_attr(org_ino, org_attr); - let rc = self.icache.inc_rc(org_ino); + self.icache.cache_attr(org_ino, org_attr).await; + let rc = self.icache.inc_rc(org_ino).await; trace!(org_ino, repo_ino, rc, "lookup: resolved content inode"); Ok(org_attr) } @@ -443,7 +532,7 @@ impl Fs for OrgFs { ino: Inode, _fh: Option, ) -> Result { - self.icache.get_attr(ino).ok_or_else(|| { + self.icache.get_attr(ino).await.ok_or_else(|| { warn!(ino, "getattr on unknown inode"); GetAttrError::InodeNotFound }) @@ -451,12 +540,7 @@ impl Fs for OrgFs { #[instrument(skip(self), fields(org = %self.name))] async fn readdir(&mut self, ino: Inode) -> Result<&[DirEntry], ReadDirError> { - debug_assert!( - self.icache.contains(ino), - "readdir: inode {ino} not in inode table" - ); - - match self.inode_role(ino) { + match self.inode_role(ino).await { InodeRole::OrgRoot => { // TODO(MES-674): Cleanup "special" casing for github. if self.is_github() { @@ -485,12 +569,14 @@ impl Fs for OrgFs { let mut entries = Vec::with_capacity(repo_infos.len()); for (repo_name, default_branch) in &repo_infos { - let (repo_ino, _) = self.ensure_repo_inode( - repo_name, - repo_name, - default_branch, - Self::ROOT_INO, - ); + let (repo_ino, _) = self + .ensure_repo_inode( + repo_name, + repo_name, + default_branch, + Self::ROOT_INO, + ) + .await; entries.push(DirEntry { ino: repo_ino, name: repo_name.clone().into(), @@ -498,19 +584,14 @@ impl Fs for OrgFs { }); } - let icb = self - .icache - .get_icb_mut(ino) - .ok_or(ReadDirError::InodeNotFound)?; - Ok(icb.children.insert(entries)) + self.readdir_buf = entries; + Ok(&self.readdir_buf) } InodeRole::OwnerDir if self.is_github() => { // TODO(MES-674): Cleanup "special" casing for github. - return Err(ReadDirError::NotPermitted); - } - InodeRole::OwnerDir => { - return Err(ReadDirError::NotADirectory); + Err(ReadDirError::NotPermitted) } + InodeRole::OwnerDir => Err(ReadDirError::NotADirectory), InodeRole::RepoOwned { idx } => { // Delegate to repo. let repo_ino = self.repos[idx] @@ -523,20 +604,22 @@ impl Fs for OrgFs { let mut org_entries = Vec::with_capacity(repo_entries.len()); for entry in &repo_entries { - let org_child_ino = self.translate_repo_ino_to_org( - idx, - entry.ino, - ino, - &entry.name, - entry.kind, - ); + let org_child_ino = self + .translate_repo_ino_to_org( + idx, + entry.ino, + ino, + &entry.name, + entry.kind, + ) + .await; // Cache attr from repo if available. if let Some(repo_icb_attr) = - self.repos[idx].repo.inode_table_get_attr(entry.ino) + self.repos[idx].repo.inode_table_get_attr(entry.ino).await { let org_attr = self.repos[idx].bridge.attr_backward(repo_icb_attr); - self.icache.cache_attr(org_child_ino, org_attr); + self.icache.cache_attr(org_child_ino, org_attr).await; } else { trace!( repo_ino = entry.ino, @@ -552,18 +635,15 @@ impl Fs for OrgFs { }); } - let icb = self - .icache - .get_icb_mut(ino) - .ok_or(ReadDirError::InodeNotFound)?; - Ok(icb.children.insert(org_entries)) + self.readdir_buf = org_entries; + Ok(&self.readdir_buf) } } } #[instrument(skip(self), fields(org = %self.name))] async fn open(&mut self, ino: Inode, flags: OpenFlags) -> Result { - let idx = self.repo_slot_for_inode(ino).ok_or_else(|| { + let idx = self.repo_slot_for_inode(ino).await.ok_or_else(|| { warn!(ino, "open on inode not belonging to any repo"); OpenError::InodeNotFound })?; @@ -597,7 +677,7 @@ impl Fs for OrgFs { flags: OpenFlags, lock_owner: Option, ) -> Result { - let idx = self.repo_slot_for_inode(ino).ok_or_else(|| { + let idx = self.repo_slot_for_inode(ino).await.ok_or_else(|| { warn!(ino, "read on inode not belonging to any repo"); ReadError::InodeNotFound })?; @@ -628,7 +708,7 @@ impl Fs for OrgFs { flags: OpenFlags, flush: bool, ) -> Result<(), ReleaseError> { - let idx = self.repo_slot_for_inode(ino).ok_or_else(|| { + let idx = self.repo_slot_for_inode(ino).await.ok_or_else(|| { warn!(ino, "release on inode not belonging to any repo"); ReleaseError::FileNotOpen })?; @@ -656,13 +736,8 @@ impl Fs for OrgFs { #[instrument(skip(self), fields(org = %self.name))] async fn forget(&mut self, ino: Inode, nlookups: u64) { - debug_assert!( - self.icache.contains(ino), - "forget: inode {ino} not in inode table" - ); - // Propagate forget to inner repo if applicable. - if let Some(idx) = self.repo_slot_for_inode(ino) { + if let Some(idx) = self.repo_slot_for_inode(ino).await { if let Some(&repo_ino) = self.repos[idx].bridge.inode_map_get_by_left(ino) { self.repos[idx].repo.forget(repo_ino, nlookups).await; } else { @@ -673,7 +748,7 @@ impl Fs for OrgFs { } } - if self.icache.forget(ino, nlookups).is_some() { + if self.icache.forget(ino, nlookups).await.is_some() { // Clean up repo_inodes and owner_inodes mappings. self.repo_inodes.remove(&ino); self.owner_inodes.remove(&ino); From a93a327566b571b1fd00c5a1f80be9c8dad56487 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 11:36:00 -0800 Subject: [PATCH 08/57] feat(mescloud): implement MesaResolver, update MesaFS to use async icache --- src/fs/mescloud/mod.rs | 223 ++++++++++++++++++++++++++--------------- 1 file changed, 143 insertions(+), 80 deletions(-) diff --git a/src/fs/mescloud/mod.rs b/src/fs/mescloud/mod.rs index 64ea0ad..a2fc668 100644 --- a/src/fs/mescloud/mod.rs +++ b/src/fs/mescloud/mod.rs @@ -1,5 +1,7 @@ use std::collections::HashMap; use std::ffi::OsStr; +use std::future::Future; +use std::time::SystemTime; use bytes::Bytes; use mesa_dev::MesaClient; @@ -7,6 +9,7 @@ use secrecy::ExposeSecret as _; use tracing::{instrument, trace, warn}; use crate::fs::icache::bridge::HashMapBridge; +use crate::fs::icache::{AsyncICache, IcbResolver}; use crate::fs::r#trait::{ DirEntry, DirEntryType, FileAttr, FileHandle, FilesystemStats, Fs, Inode, LockOwner, OpenFile, OpenFlags, @@ -22,6 +25,7 @@ use common::InodeControlBlock; pub use common::{GetAttrError, LookupError, OpenError, ReadDirError, ReadError, ReleaseError}; use icache::MescloudICache; +use icache as mescloud_icache; mod org; pub use org::OrgConfig; @@ -30,6 +34,55 @@ use org::OrgFs; pub mod icache; pub mod repo; +// --------------------------------------------------------------------------- +// MesaResolver +// --------------------------------------------------------------------------- + +struct MesaResolver { + fs_owner: (u32, u32), + block_size: u32, +} + +impl IcbResolver for MesaResolver { + type Icb = InodeControlBlock; + type Error = std::convert::Infallible; + + fn resolve( + &self, + ino: Inode, + stub: Option, + _cache: &AsyncICache, + ) -> impl Future> + Send + where + Self: Sized, + { + let fs_owner = self.fs_owner; + let block_size = self.block_size; + async move { + let stub = stub.unwrap_or_else(|| InodeControlBlock { + parent: None, + path: "/".into(), + rc: 0, + attr: None, + }); + let now = SystemTime::now(); + let attr = FileAttr::Directory { + common: mescloud_icache::make_common_file_attr( + ino, 0o755, now, now, fs_owner, block_size, + ), + }; + Ok(InodeControlBlock { + attr: Some(attr), + ..stub + }) + } + } +} + +// --------------------------------------------------------------------------- +// MesaFS +// --------------------------------------------------------------------------- + /// Per-org wrapper with inode and file handle translation. struct OrgSlot { org: OrgFs, @@ -49,7 +102,8 @@ enum InodeRole { /// Composes multiple [`OrgFs`] instances, each with its own inode namespace, /// using [`HashMapBridge`] for bidirectional inode/fh translation at each boundary. pub struct MesaFS { - icache: MescloudICache, + icache: MescloudICache, + readdir_buf: Vec, /// Maps mesa-level org-root inodes → index into `org_slots`. org_inodes: HashMap, @@ -62,8 +116,13 @@ impl MesaFS { /// Create a new `MesaFS` instance. pub fn new(orgs: impl Iterator, fs_owner: (u32, u32)) -> Self { + let resolver = MesaResolver { + fs_owner, + block_size: Self::BLOCK_SIZE, + }; Self { - icache: MescloudICache::new(Self::ROOT_NODE_INO, fs_owner, Self::BLOCK_SIZE), + icache: MescloudICache::new(resolver, Self::ROOT_NODE_INO, fs_owner, Self::BLOCK_SIZE), + readdir_buf: Vec::new(), org_inodes: HashMap::new(), org_slots: orgs .map(|org_conf| { @@ -82,7 +141,7 @@ impl MesaFS { } /// Classify an inode by its role. - fn inode_role(&self, ino: Inode) -> InodeRole { + async fn inode_role(&self, ino: Inode) -> InodeRole { if ino == Self::ROOT_NODE_INO { return InodeRole::Root; } @@ -90,7 +149,7 @@ impl MesaFS { return InodeRole::OrgOwned { idx }; } // Walk parent chain. - if let Some(idx) = self.org_slot_for_inode(ino) { + if let Some(idx) = self.org_slot_for_inode(ino).await { return InodeRole::OrgOwned { idx }; } debug_assert!(false, "inode {ino} not found in any org slot"); @@ -98,33 +157,40 @@ impl MesaFS { } /// Find the org slot index that owns `ino` by walking the parent chain. - fn org_slot_for_inode(&self, ino: Inode) -> Option { + async fn org_slot_for_inode(&self, ino: Inode) -> Option { if let Some(&idx) = self.org_inodes.get(&ino) { return Some(idx); } let mut current = ino; - while let Some(parent) = self.icache.get_icb(current).and_then(|icb| icb.parent) { + loop { + let parent = self + .icache + .get_icb(current, |icb| icb.parent) + .await + .flatten()?; if let Some(&idx) = self.org_inodes.get(&parent) { return Some(idx); } current = parent; } - None } /// Ensure a mesa-level inode exists for the org at `org_idx`. /// Seeds the bridge with (`mesa_org_ino`, `OrgFs::ROOT_INO`). /// Does NOT bump rc. - fn ensure_org_inode(&mut self, org_idx: usize) -> (Inode, FileAttr) { + async fn ensure_org_inode(&mut self, org_idx: usize) -> (Inode, FileAttr) { // Check if an inode already exists. if let Some((&existing_ino, _)) = self.org_inodes.iter().find(|&(_, &idx)| idx == org_idx) { - if let Some(icb) = self.icache.get_icb(existing_ino) - && let Some(attr) = icb.attr - { + if let Some(attr) = self.icache.get_attr(existing_ino).await { + let rc = self + .icache + .get_icb(existing_ino, |icb| icb.rc) + .await + .unwrap_or(0); trace!( ino = existing_ino, org_idx, - rc = icb.rc, + rc, "ensure_org_inode: reusing existing inode" ); return (existing_ino, attr); @@ -134,13 +200,18 @@ impl MesaFS { ino = existing_ino, org_idx, "ensure_org_inode: attr missing, rebuilding" ); - let now = std::time::SystemTime::now(); + let now = SystemTime::now(); let attr = FileAttr::Directory { - common: self - .icache - .make_common_file_attr(existing_ino, 0o755, now, now), + common: mescloud_icache::make_common_file_attr( + existing_ino, + 0o755, + now, + now, + self.icache.fs_owner(), + self.icache.block_size(), + ), }; - self.icache.cache_attr(existing_ino, attr); + self.icache.cache_attr(existing_ino, attr).await; return (existing_ino, attr); } @@ -149,17 +220,18 @@ impl MesaFS { let ino = self.icache.allocate_inode(); trace!(ino, org_idx, org = %org_name, "ensure_org_inode: allocated new inode"); - let now = std::time::SystemTime::now(); - self.icache.insert_icb( - ino, - InodeControlBlock { - rc: 0, - path: org_name.as_str().into(), - parent: Some(Self::ROOT_NODE_INO), - children: None, - attr: None, - }, - ); + let now = SystemTime::now(); + self.icache + .insert_icb( + ino, + InodeControlBlock { + rc: 0, + path: org_name.as_str().into(), + parent: Some(Self::ROOT_NODE_INO), + attr: None, + }, + ) + .await; self.org_inodes.insert(ino, org_idx); @@ -169,9 +241,16 @@ impl MesaFS { .insert_inode(ino, OrgFs::ROOT_INO); let attr = FileAttr::Directory { - common: self.icache.make_common_file_attr(ino, 0o755, now, now), + common: mescloud_icache::make_common_file_attr( + ino, + 0o755, + now, + now, + self.icache.fs_owner(), + self.icache.block_size(), + ), }; - self.icache.cache_attr(ino, attr); + self.icache.cache_attr(ino, attr).await; (ino, attr) } @@ -184,7 +263,7 @@ impl MesaFS { /// Translate an org inode to a mesa inode, allocating if needed. /// Also mirrors the ICB into the mesa `inode_table`. - fn translate_org_ino_to_mesa( + async fn translate_org_ino_to_mesa( &mut self, slot_idx: usize, org_ino: Inode, @@ -196,13 +275,17 @@ impl MesaFS { .backward_or_insert_inode(org_ino, || self.icache.allocate_inode()); self.icache - .entry_or_insert_icb(mesa_ino, || InodeControlBlock { - rc: 0, - path: name.into(), - parent: Some(parent_mesa_ino), - children: None, - attr: None, - }); + .entry_or_insert_icb( + mesa_ino, + || InodeControlBlock { + rc: 0, + path: name.into(), + parent: Some(parent_mesa_ino), + attr: None, + }, + |_| {}, + ) + .await; mesa_ino } @@ -219,12 +302,7 @@ impl Fs for MesaFS { #[instrument(skip(self))] async fn lookup(&mut self, parent: Inode, name: &OsStr) -> Result { - debug_assert!( - self.icache.contains(parent), - "lookup: parent inode {parent} not in inode table" - ); - - match self.inode_role(parent) { + match self.inode_role(parent).await { InodeRole::Root => { // Children of root are orgs. let org_name = name.to_str().ok_or(LookupError::InodeNotFound)?; @@ -235,8 +313,8 @@ impl Fs for MesaFS { .ok_or(LookupError::InodeNotFound)?; trace!(org = org_name, "lookup: matched org"); - let (ino, attr) = self.ensure_org_inode(org_idx); - let rc = self.icache.inc_rc(ino); + let (ino, attr) = self.ensure_org_inode(org_idx).await; + let rc = self.icache.inc_rc(ino).await; trace!(ino, org = org_name, rc, "lookup: resolved org inode"); Ok(attr) } @@ -249,11 +327,12 @@ impl Fs for MesaFS { let org_attr = self.org_slots[idx].org.lookup(org_parent, name).await?; let org_ino = org_attr.common().ino; - let mesa_ino = self.translate_org_ino_to_mesa(idx, org_ino, parent, name); + let mesa_ino = + self.translate_org_ino_to_mesa(idx, org_ino, parent, name).await; let mesa_attr = self.org_slots[idx].bridge.attr_backward(org_attr); - self.icache.cache_attr(mesa_ino, mesa_attr); - let rc = self.icache.inc_rc(mesa_ino); + self.icache.cache_attr(mesa_ino, mesa_attr).await; + let rc = self.icache.inc_rc(mesa_ino).await; trace!(mesa_ino, org_ino, rc, "lookup: resolved via org delegation"); Ok(mesa_attr) } @@ -266,7 +345,7 @@ impl Fs for MesaFS { ino: Inode, _fh: Option, ) -> Result { - self.icache.get_attr(ino).ok_or_else(|| { + self.icache.get_attr(ino).await.ok_or_else(|| { warn!(ino, "getattr on unknown inode"); GetAttrError::InodeNotFound }) @@ -274,12 +353,7 @@ impl Fs for MesaFS { #[instrument(skip(self))] async fn readdir(&mut self, ino: Inode) -> Result<&[DirEntry], ReadDirError> { - debug_assert!( - self.icache.contains(ino), - "readdir: inode {ino} not in inode table" - ); - - match self.inode_role(ino) { + match self.inode_role(ino).await { InodeRole::Root => { let org_info: Vec<(usize, String)> = self .org_slots @@ -290,7 +364,7 @@ impl Fs for MesaFS { let mut entries = Vec::with_capacity(org_info.len()); for (org_idx, name) in &org_info { - let (org_ino, _) = self.ensure_org_inode(*org_idx); + let (org_ino, _) = self.ensure_org_inode(*org_idx).await; entries.push(DirEntry { ino: org_ino, name: name.clone().into(), @@ -300,11 +374,8 @@ impl Fs for MesaFS { trace!(entry_count = entries.len(), "readdir: listing orgs"); - let icb = self - .icache - .get_icb_mut(ino) - .ok_or(ReadDirError::InodeNotFound)?; - Ok(icb.children.insert(entries)) + self.readdir_buf = entries; + Ok(&self.readdir_buf) } InodeRole::OrgOwned { idx } => { let org_ino = self.org_slots[idx] @@ -317,14 +388,14 @@ impl Fs for MesaFS { let mut mesa_entries = Vec::with_capacity(org_entries.len()); for entry in &org_entries { let mesa_child_ino = - self.translate_org_ino_to_mesa(idx, entry.ino, ino, &entry.name); + self.translate_org_ino_to_mesa(idx, entry.ino, ino, &entry.name).await; // Cache attr from org if available. if let Some(org_icb_attr) = - self.org_slots[idx].org.inode_table_get_attr(entry.ino) + self.org_slots[idx].org.inode_table_get_attr(entry.ino).await { let mesa_attr = self.org_slots[idx].bridge.attr_backward(org_icb_attr); - self.icache.cache_attr(mesa_child_ino, mesa_attr); + self.icache.cache_attr(mesa_child_ino, mesa_attr).await; } mesa_entries.push(DirEntry { @@ -334,18 +405,15 @@ impl Fs for MesaFS { }); } - let icb = self - .icache - .get_icb_mut(ino) - .ok_or(ReadDirError::InodeNotFound)?; - Ok(icb.children.insert(mesa_entries)) + self.readdir_buf = mesa_entries; + Ok(&self.readdir_buf) } } } #[instrument(skip(self))] async fn open(&mut self, ino: Inode, flags: OpenFlags) -> Result { - let idx = self.org_slot_for_inode(ino).ok_or_else(|| { + let idx = self.org_slot_for_inode(ino).await.ok_or_else(|| { warn!(ino, "open on inode not belonging to any org"); OpenError::InodeNotFound })?; @@ -379,7 +447,7 @@ impl Fs for MesaFS { flags: OpenFlags, lock_owner: Option, ) -> Result { - let idx = self.org_slot_for_inode(ino).ok_or_else(|| { + let idx = self.org_slot_for_inode(ino).await.ok_or_else(|| { warn!(ino, "read on inode not belonging to any org"); ReadError::InodeNotFound })?; @@ -406,7 +474,7 @@ impl Fs for MesaFS { flags: OpenFlags, flush: bool, ) -> Result<(), ReleaseError> { - let idx = self.org_slot_for_inode(ino).ok_or_else(|| { + let idx = self.org_slot_for_inode(ino).await.ok_or_else(|| { warn!(ino, "release on inode not belonging to any org"); ReleaseError::FileNotOpen })?; @@ -432,19 +500,14 @@ impl Fs for MesaFS { #[instrument(skip(self))] async fn forget(&mut self, ino: Inode, nlookups: u64) { - debug_assert!( - self.icache.contains(ino), - "forget: inode {ino} not in inode table" - ); - // Propagate forget to inner org if applicable. - if let Some(idx) = self.org_slot_for_inode(ino) + if let Some(idx) = self.org_slot_for_inode(ino).await && let Some(&org_ino) = self.org_slots[idx].bridge.inode_map_get_by_left(ino) { self.org_slots[idx].org.forget(org_ino, nlookups).await; } - if self.icache.forget(ino, nlookups).is_some() { + if self.icache.forget(ino, nlookups).await.is_some() { self.org_inodes.remove(&ino); for slot in &mut self.org_slots { slot.bridge.remove_inode_by_left(ino); From b29ea2eca8cb2448f4b55ee1d1a9a306db6d0806 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 11:42:20 -0800 Subject: [PATCH 09/57] CLAUDE.md --- CLAUDE.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 CLAUDE.md diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..44e302e --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,15 @@ +# Code cleanliness + +- You are NOT ALLOWED to add useless code separators like this: + + ```rust + // --------------------------------------------------------------------------- + // Some section + // --------------------------------------------------------------------------- + ``` + + These are considered bad practice and indicate that the code is not + well-structured. Prefer using functions and modules to organize your code. + + If you feel the need to add such separators, it likely means that your code + is too long and should be refactored into smaller, more manageable pieces. From 883c14aa450e13b794d08c2eb4c8aebc9fef4b39 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 11:44:21 -0800 Subject: [PATCH 10/57] chore: clean up async icache migration Fix all clippy warnings and dead code warnings introduced during the async icache migration (Tasks 1-7). Zero warnings from cargo clippy. --- src/fs/icache/async_cache.rs | 7 +++---- src/fs/icache/cache.rs | 3 +++ src/fs/mescloud/icache.rs | 12 ++++++++---- src/fs/mescloud/repo.rs | 18 ++++++++++-------- 4 files changed, 24 insertions(+), 16 deletions(-) diff --git a/src/fs/icache/async_cache.rs b/src/fs/icache/async_cache.rs index 1feb516..04bea04 100644 --- a/src/fs/icache/async_cache.rs +++ b/src/fs/icache/async_cache.rs @@ -220,7 +220,7 @@ impl AsyncICache { let t = then_fn.take().unwrap_or_else(|| unreachable!()); Some(t(icb)) } - _ => None, + IcbState::InFlight(_) | IcbState::Available(_) => None, }) .await; if let Some(Some(r)) = hit { @@ -240,9 +240,8 @@ impl AsyncICache { // Stub needing resolution — extract stub, replace with InFlight let (tx, rx) = watch::channel(()); let old = std::mem::replace(occ.get_mut(), IcbState::InFlight(rx)); - let stub = match old { - IcbState::Available(icb) => icb, - _ => unreachable!(), + let IcbState::Available(stub) = old else { + unreachable!() }; drop(occ); // release shard lock before awaiting diff --git a/src/fs/icache/cache.rs b/src/fs/icache/cache.rs index 675a3ba..47c29aa 100644 --- a/src/fs/icache/cache.rs +++ b/src/fs/icache/cache.rs @@ -48,6 +48,7 @@ impl ICache { } /// Insert an ICB directly. + #[expect(dead_code, reason = "public API method for future use")] pub fn insert_icb(&mut self, ino: Inode, icb: I) { self.inode_table.insert(ino, icb); } @@ -64,6 +65,7 @@ impl ICache { } /// Increment rc. Panics (via unwrap) if inode doesn't exist. + #[expect(dead_code, reason = "public API method for future use")] pub fn inc_rc(&mut self, ino: Inode) -> u64 { let icb = self .inode_table @@ -93,6 +95,7 @@ impl ICache { } } + #[expect(dead_code, reason = "public API method for future use")] pub fn iter(&self) -> impl Iterator { self.inode_table.iter() } diff --git a/src/fs/mescloud/icache.rs b/src/fs/mescloud/icache.rs index 5c58c8f..c694ed3 100644 --- a/src/fs/mescloud/icache.rs +++ b/src/fs/mescloud/icache.rs @@ -45,7 +45,7 @@ pub fn blocks_of_size(block_size: u32, size: u64) -> u64 { size.div_ceil(u64::from(block_size)) } -/// Free function -- usable by both MescloudICache and resolvers. +/// Free function -- usable by both `MescloudICache` and resolvers. pub fn make_common_file_attr( ino: Inode, perm: u16, @@ -68,7 +68,7 @@ pub fn make_common_file_attr( } } -/// Mescloud-specific directory cache wrapper over AsyncICache. +/// Mescloud-specific directory cache wrapper over `AsyncICache`. pub struct MescloudICache> { inner: AsyncICache, inode_factory: InodeFactory, @@ -108,6 +108,7 @@ impl> MescloudICache { self.inner.get_icb(ino, f).await } + #[expect(dead_code, reason = "public API method for future use")] pub async fn get_icb_mut( &self, ino: Inode, @@ -117,7 +118,7 @@ impl> MescloudICache { } pub async fn insert_icb(&self, ino: Inode, icb: InodeControlBlock) { - self.inner.insert_icb(ino, icb).await + self.inner.insert_icb(ino, icb).await; } pub async fn entry_or_insert_icb( @@ -151,10 +152,12 @@ impl> MescloudICache { self.inner.allocate_fh() } + #[expect(dead_code, reason = "public API method for future use")] pub fn for_each(&self, f: impl FnMut(&Inode, &InodeControlBlock)) { - self.inner.for_each(f) + self.inner.for_each(f); } + #[expect(dead_code, reason = "public API method for future use")] pub fn inode_count(&self) -> usize { self.inner.inode_count() } @@ -235,6 +238,7 @@ impl> MescloudICache { } /// Direct access to the inner async cache for resolvers that need it. + #[expect(dead_code, reason = "public API method for future use")] pub fn inner(&self) -> &AsyncICache { &self.inner } diff --git a/src/fs/mescloud/repo.rs b/src/fs/mescloud/repo.rs index ba94611..8c4bdcf 100644 --- a/src/fs/mescloud/repo.rs +++ b/src/fs/mescloud/repo.rs @@ -59,7 +59,7 @@ impl IcbResolver for RepoResolver { let block_size = self.block_size; async move { - let stub = stub.expect("RepoResolver requires a stub ICB"); + let stub = stub.unwrap_or_else(|| unreachable!("RepoResolver requires a stub ICB")); let file_path = build_repo_path(stub.parent, &stub.path, cache, RepoFs::ROOT_INO).await; @@ -73,10 +73,6 @@ impl IcbResolver for RepoResolver { .map_err(MesaApiError::from)?; let now = SystemTime::now(); - #[expect( - clippy::match_same_arms, - reason = "symlink arm will diverge once readlink is wired up" - )] let attr = match &content { Content::File(f) => { let size = f.size.to_u64().unwrap_or(0); @@ -250,8 +246,9 @@ impl Fs for RepoFs { let ino = self.icache.ensure_child_ino(parent, name).await; let attr = self .icache - .get_or_resolve(ino, |icb| icb.attr.expect("resolver should populate attr")) - .await?; + .get_or_resolve(ino, |icb| icb.attr) + .await? + .ok_or(LookupError::InodeNotFound)?; self.icache.cache_attr(ino, attr).await; let rc = self.icache.inc_rc(ino).await; @@ -333,7 +330,12 @@ impl Fs for RepoFs { self.icache.block_size(), ), }, - _ => FileAttr::RegularFile { + DirEntryType::RegularFile + | DirEntryType::Symlink + | DirEntryType::CharDevice + | DirEntryType::BlockDevice + | DirEntryType::NamedPipe + | DirEntryType::Socket => FileAttr::RegularFile { common: mescloud_icache::make_common_file_attr( child_ino, 0o644, From 3aead5cde4f7609b05a043dda554ae9c2a48097d Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 11:47:39 -0800 Subject: [PATCH 11/57] better claude --- CLAUDE.md | 38 +++++++++++++++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/CLAUDE.md b/CLAUDE.md index 44e302e..9ba3f68 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -1,5 +1,19 @@ -# Code cleanliness +## Verify Changes +After making code changes, always run this sequence: + +``` +cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet +``` + +## Code Style & Conventions + +- Use `thiserror` for errors. +- Prefer `Result` over `.unwrap()` / `.expect()` - handle errors explicitly +- Use `impl Trait` in argument position for simple generic bounds +- Prefer iterators and combinators over manual loops where readable +- Destructure structs at use sites when accessing multiple fields +- Use `#[must_use]` on functions whose return values should not be ignored - You are NOT ALLOWED to add useless code separators like this: ```rust @@ -13,3 +27,25 @@ If you feel the need to add such separators, it likely means that your code is too long and should be refactored into smaller, more manageable pieces. + +## Module Organization + +- One public type per file when the type is complex +- Re-export public API from `lib.rs` / `mod.rs` +- Keep `mod` declarations in parent, not via `mod.rs` in subdirectories (2018 edition style) +- Group imports: std → external crates → crate-internal (`use crate::...`) + +## Async / Concurrency + +- Runtime: tokio (multi-threaded) +- Prefer `tokio::spawn` for concurrent tasks; use `JoinSet` for structured concurrency +- Use `tokio::select!` for racing futures; always include cancellation safety notes +- Channels: `tokio::sync::mpsc` for multi-producer, `tokio::sync::oneshot` for request-response +- Never block the async runtime — offload blocking work with `tokio::task::spawn_blocking` + +## Dependencies + +- Check for existing deps with `cargo tree` before adding new crates +- Pin major versions in `Cargo.toml` (e.g., `serde = "1"`) +- Minimize feature flags — only enable what you use +- Audit new deps: check download counts, maintenance status, and `cargo audit` From 7dca312418d9ffb4b71efffb6eb6d9d3bfa830cc Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 11:53:04 -0800 Subject: [PATCH 12/57] deslop --- src/fs/mescloud/repo.rs | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/src/fs/mescloud/repo.rs b/src/fs/mescloud/repo.rs index 8c4bdcf..56381f8 100644 --- a/src/fs/mescloud/repo.rs +++ b/src/fs/mescloud/repo.rs @@ -25,10 +25,6 @@ pub use super::common::{ use super::icache as mescloud_icache; use super::icache::{InodeControlBlock, MescloudICache}; -// --------------------------------------------------------------------------- -// RepoResolver -// --------------------------------------------------------------------------- - pub(super) struct RepoResolver { client: MesaClient, org_name: String, @@ -60,8 +56,7 @@ impl IcbResolver for RepoResolver { async move { let stub = stub.unwrap_or_else(|| unreachable!("RepoResolver requires a stub ICB")); - let file_path = - build_repo_path(stub.parent, &stub.path, cache, RepoFs::ROOT_INO).await; + let file_path = build_repo_path(stub.parent, &stub.path, cache, RepoFs::ROOT_INO).await; let content = client .org(&org_name) @@ -111,10 +106,6 @@ impl IcbResolver for RepoResolver { } } -// --------------------------------------------------------------------------- -// build_repo_path helper -// --------------------------------------------------------------------------- - /// Walk the parent chain in the cache to build the repo-relative path. /// Returns `None` for the root inode (maps to `path=None` in the mesa content API). async fn build_repo_path( @@ -142,10 +133,6 @@ async fn build_repo_path( joined.to_str().map(String::from) } -// --------------------------------------------------------------------------- -// RepoFs -// --------------------------------------------------------------------------- - /// A filesystem rooted at a single mesa repository. /// /// Implements [`Fs`] for navigating files and directories within one repo. From d259805fb98da3464f7f2e20382a0dae33f70069 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 11:57:54 -0800 Subject: [PATCH 13/57] remove dead code --- src/fs/icache/cache.rs | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/src/fs/icache/cache.rs b/src/fs/icache/cache.rs index 47c29aa..0beeadf 100644 --- a/src/fs/icache/cache.rs +++ b/src/fs/icache/cache.rs @@ -47,12 +47,6 @@ impl ICache { self.inode_table.contains_key(&ino) } - /// Insert an ICB directly. - #[expect(dead_code, reason = "public API method for future use")] - pub fn insert_icb(&mut self, ino: Inode, icb: I) { - self.inode_table.insert(ino, icb); - } - /// Insert an ICB only if absent. /// Returns a mutable reference to the (possibly pre-existing) ICB. pub fn entry_or_insert_icb(&mut self, ino: Inode, f: impl FnOnce() -> I) -> &mut I { @@ -64,17 +58,6 @@ impl ICache { self.inode_table.len() } - /// Increment rc. Panics (via unwrap) if inode doesn't exist. - #[expect(dead_code, reason = "public API method for future use")] - pub fn inc_rc(&mut self, ino: Inode) -> u64 { - let icb = self - .inode_table - .get_mut(&ino) - .unwrap_or_else(|| unreachable!("inc_rc: inode {ino} not in table")); - *icb.rc_mut() += 1; - icb.rc() - } - /// Decrement rc by `nlookups`. Returns `Some(evicted_icb)` if the inode was evicted. pub fn forget(&mut self, ino: Inode, nlookups: u64) -> Option { match self.inode_table.entry(ino) { @@ -94,9 +77,4 @@ impl ICache { } } } - - #[expect(dead_code, reason = "public API method for future use")] - pub fn iter(&self) -> impl Iterator { - self.inode_table.iter() - } } From 598ac67ca6909a84741428279b1b8c51e25b57f0 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 12:21:01 -0800 Subject: [PATCH 14/57] feat: add FileTable type for file handle allocation --- src/fs/icache/file_table.rs | 37 +++++++++++++++++++++++++++++++++++++ src/fs/icache/mod.rs | 3 +++ 2 files changed, 40 insertions(+) create mode 100644 src/fs/icache/file_table.rs diff --git a/src/fs/icache/file_table.rs b/src/fs/icache/file_table.rs new file mode 100644 index 0000000..74b14a3 --- /dev/null +++ b/src/fs/icache/file_table.rs @@ -0,0 +1,37 @@ +use std::sync::atomic::{AtomicU64, Ordering}; + +use crate::fs::r#trait::FileHandle; + +/// Monotonically increasing file handle allocator. +#[allow(clippy::allow_attributes)] +#[allow(dead_code)] +pub struct FileTable { + next_fh: AtomicU64, +} + +#[allow(clippy::allow_attributes)] +#[allow(dead_code)] +impl FileTable { + pub fn new() -> Self { + Self { + next_fh: AtomicU64::new(1), + } + } + + pub fn allocate(&self) -> FileHandle { + self.next_fh.fetch_add(1, Ordering::Relaxed) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn allocate_returns_monotonic_handles() { + let ft = FileTable::new(); + assert_eq!(ft.allocate(), 1); + assert_eq!(ft.allocate(), 2); + assert_eq!(ft.allocate(), 3); + } +} diff --git a/src/fs/icache/mod.rs b/src/fs/icache/mod.rs index c596f85..35a930c 100644 --- a/src/fs/icache/mod.rs +++ b/src/fs/icache/mod.rs @@ -3,11 +3,14 @@ pub mod async_cache; pub mod bridge; mod cache; +mod file_table; mod inode_factory; pub use async_cache::AsyncICache; pub use async_cache::IcbResolver; pub use cache::ICache; +#[expect(unused_imports)] +pub use file_table::FileTable; pub use inode_factory::InodeFactory; /// Common interface for inode control block types usable with `ICache`. From 80688b90e6c3fcf72122fd62cd2facf03b4778ec Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 12:25:51 -0800 Subject: [PATCH 15/57] refactor: move file handle allocation from icaches to FileTable on each filesystem --- src/fs/icache/async_cache.rs | 26 ++++++++--------------- src/fs/icache/cache.rs | 18 +++------------- src/fs/icache/file_table.rs | 4 ---- src/fs/icache/mod.rs | 1 - src/fs/local.rs | 6 ++++-- src/fs/mescloud/icache.rs | 14 ++++++------- src/fs/mescloud/mod.rs | 28 ++++++++++++++----------- src/fs/mescloud/org.rs | 40 ++++++++++-------------------------- src/fs/mescloud/repo.rs | 6 ++++-- 9 files changed, 52 insertions(+), 91 deletions(-) diff --git a/src/fs/icache/async_cache.rs b/src/fs/icache/async_cache.rs index 04bea04..f16ef8f 100644 --- a/src/fs/icache/async_cache.rs +++ b/src/fs/icache/async_cache.rs @@ -1,20 +1,24 @@ //! Async inode cache with InFlight/Available state machine. use std::future::Future; -use std::sync::atomic::{AtomicU64, Ordering}; use scc::HashMap as ConcurrentHashMap; use tokio::sync::watch; use tracing::{trace, warn}; -use crate::fs::r#trait::{FileHandle, Inode}; +use crate::fs::r#trait::Inode; use super::IcbLike; /// State of an entry in the async inode cache. pub enum IcbState { /// Entry is being loaded; waiters clone the receiver and `.changed().await`. + /// + /// The channel carries `()` rather than the resolved value because the map + /// is the single source of truth: ICBs are mutated in-place (rc, attrs) so + /// a snapshot in the channel would immediately go stale. Sender-drop also + /// gives us implicit, leak-proof signalling on both success and error paths. InFlight(watch::Receiver<()>), /// Entry is ready for use. Available(I), @@ -49,11 +53,10 @@ pub trait IcbResolver: Send + Sync { /// Async, concurrency-safe inode cache. /// /// All methods take `&self` — internal synchronization is provided by -/// `scc::HashMap` (sharded lock-free map) and `AtomicU64`. +/// `scc::HashMap` (sharded lock-free map). pub struct AsyncICache { resolver: R, inode_table: ConcurrentHashMap>, - next_fh: AtomicU64, } impl AsyncICache { @@ -68,15 +71,9 @@ impl AsyncICache { Self { resolver, inode_table: table, - next_fh: AtomicU64::new(1), } } - /// Allocate a monotonically increasing file handle. - pub fn allocate_fh(&self) -> FileHandle { - self.next_fh.fetch_add(1, Ordering::Relaxed) - } - /// Number of entries (`InFlight` + `Available`) in the table. pub fn inode_count(&self) -> usize { self.inode_table.len() @@ -370,6 +367,7 @@ mod tests { use super::*; use std::collections::HashMap as StdHashMap; use std::path::PathBuf; + use std::sync::atomic::Ordering; use std::sync::{Arc, Mutex}; #[derive(Debug, Clone, PartialEq)] @@ -754,14 +752,6 @@ mod tests { assert_eq!(count, 1, "only root, not the InFlight entry"); } - #[tokio::test] - async fn allocate_fh_increments() { - let cache = test_cache(); - assert_eq!(cache.allocate_fh(), 1, "first fh should be 1"); - assert_eq!(cache.allocate_fh(), 2, "second fh should be 2"); - assert_eq!(cache.allocate_fh(), 3, "third fh should be 3"); - } - #[tokio::test] async fn wait_does_not_miss_signal_on_immediate_complete() { let cache = Arc::new(test_cache()); diff --git a/src/fs/icache/cache.rs b/src/fs/icache/cache.rs index 0beeadf..6a6a416 100644 --- a/src/fs/icache/cache.rs +++ b/src/fs/icache/cache.rs @@ -4,17 +4,15 @@ use std::collections::HashMap; use tracing::{trace, warn}; -use crate::fs::r#trait::{FileHandle, Inode}; +use crate::fs::r#trait::Inode; use super::IcbLike; /// Generic directory cache. /// -/// Owns an inode table and a file handle counter. Provides reference counting, -/// ICB lookup/insertion, and file handle allocation. +/// Owns an inode table. Provides reference counting and ICB lookup/insertion. pub struct ICache { inode_table: HashMap, - next_fh: FileHandle, } impl ICache { @@ -22,17 +20,7 @@ impl ICache { pub fn new(root_ino: Inode, root_path: impl Into) -> Self { let mut inode_table = HashMap::new(); inode_table.insert(root_ino, I::new_root(root_path.into())); - Self { - inode_table, - next_fh: 1, - } - } - - /// Allocate a file handle (increments `next_fh` and returns the old value). - pub fn allocate_fh(&mut self) -> FileHandle { - let fh = self.next_fh; - self.next_fh += 1; - fh + Self { inode_table } } pub fn get_icb(&self, ino: Inode) -> Option<&I> { diff --git a/src/fs/icache/file_table.rs b/src/fs/icache/file_table.rs index 74b14a3..2ed810d 100644 --- a/src/fs/icache/file_table.rs +++ b/src/fs/icache/file_table.rs @@ -3,14 +3,10 @@ use std::sync::atomic::{AtomicU64, Ordering}; use crate::fs::r#trait::FileHandle; /// Monotonically increasing file handle allocator. -#[allow(clippy::allow_attributes)] -#[allow(dead_code)] pub struct FileTable { next_fh: AtomicU64, } -#[allow(clippy::allow_attributes)] -#[allow(dead_code)] impl FileTable { pub fn new() -> Self { Self { diff --git a/src/fs/icache/mod.rs b/src/fs/icache/mod.rs index 35a930c..41d94ae 100644 --- a/src/fs/icache/mod.rs +++ b/src/fs/icache/mod.rs @@ -9,7 +9,6 @@ mod inode_factory; pub use async_cache::AsyncICache; pub use async_cache::IcbResolver; pub use cache::ICache; -#[expect(unused_imports)] pub use file_table::FileTable; pub use inode_factory::InodeFactory; diff --git a/src/fs/local.rs b/src/fs/local.rs index b568efd..78ff12b 100644 --- a/src/fs/local.rs +++ b/src/fs/local.rs @@ -8,7 +8,7 @@ use tokio::io::{AsyncReadExt as _, AsyncSeekExt as _}; use std::ffi::OsStr; use tracing::warn; -use crate::fs::icache::{ICache, IcbLike}; +use crate::fs::icache::{FileTable, ICache, IcbLike}; use crate::fs::r#trait::{ DirEntry, FileAttr, FileHandle, FileOpenOptions, FilesystemStats, Fs, Inode, LockOwner, OpenFile, OpenFlags, @@ -163,6 +163,7 @@ impl IcbLike for InodeControlBlock { pub struct LocalFs { icache: ICache, + file_table: FileTable, open_files: HashMap, } @@ -171,6 +172,7 @@ impl LocalFs { pub fn new(abs_path: impl Into) -> Self { Self { icache: ICache::new(1, abs_path), + file_table: FileTable::new(), open_files: HashMap::new(), } } @@ -346,7 +348,7 @@ impl Fs for LocalFs { .map_err(OpenError::Io)?; // Generate a new file handle. - let fh = self.icache.allocate_fh(); + let fh = self.file_table.allocate(); self.open_files.insert(fh, file); Ok(OpenFile { diff --git a/src/fs/mescloud/icache.rs b/src/fs/mescloud/icache.rs index c694ed3..5e84870 100644 --- a/src/fs/mescloud/icache.rs +++ b/src/fs/mescloud/icache.rs @@ -4,9 +4,7 @@ use std::ffi::OsStr; use std::time::SystemTime; use crate::fs::icache::{AsyncICache, IcbLike, IcbResolver, InodeFactory}; -use crate::fs::r#trait::{ - CommonFileAttr, FileAttr, FileHandle, FilesystemStats, Inode, Permissions, -}; +use crate::fs::r#trait::{CommonFileAttr, FileAttr, FilesystemStats, Inode, Permissions}; /// Inode control block for mescloud filesystem layers. pub struct InodeControlBlock { @@ -104,7 +102,11 @@ impl> MescloudICache { self.inner.contains(ino).await } - pub async fn get_icb(&self, ino: Inode, f: impl FnOnce(&InodeControlBlock) -> T) -> Option { + pub async fn get_icb( + &self, + ino: Inode, + f: impl FnOnce(&InodeControlBlock) -> T, + ) -> Option { self.inner.get_icb(ino, f).await } @@ -148,10 +150,6 @@ impl> MescloudICache { // -- Delegated (sync) -- - pub fn allocate_fh(&self) -> FileHandle { - self.inner.allocate_fh() - } - #[expect(dead_code, reason = "public API method for future use")] pub fn for_each(&self, f: impl FnMut(&Inode, &InodeControlBlock)) { self.inner.for_each(f); diff --git a/src/fs/mescloud/mod.rs b/src/fs/mescloud/mod.rs index a2fc668..e98872c 100644 --- a/src/fs/mescloud/mod.rs +++ b/src/fs/mescloud/mod.rs @@ -9,7 +9,7 @@ use secrecy::ExposeSecret as _; use tracing::{instrument, trace, warn}; use crate::fs::icache::bridge::HashMapBridge; -use crate::fs::icache::{AsyncICache, IcbResolver}; +use crate::fs::icache::{AsyncICache, FileTable, IcbResolver}; use crate::fs::r#trait::{ DirEntry, DirEntryType, FileAttr, FileHandle, FilesystemStats, Fs, Inode, LockOwner, OpenFile, OpenFlags, @@ -24,8 +24,8 @@ mod common; use common::InodeControlBlock; pub use common::{GetAttrError, LookupError, OpenError, ReadDirError, ReadError, ReleaseError}; -use icache::MescloudICache; use icache as mescloud_icache; +use icache::MescloudICache; mod org; pub use org::OrgConfig; @@ -103,6 +103,7 @@ enum InodeRole { /// using [`HashMapBridge`] for bidirectional inode/fh translation at each boundary. pub struct MesaFS { icache: MescloudICache, + file_table: FileTable, readdir_buf: Vec, /// Maps mesa-level org-root inodes → index into `org_slots`. @@ -122,6 +123,7 @@ impl MesaFS { }; Self { icache: MescloudICache::new(resolver, Self::ROOT_NODE_INO, fs_owner, Self::BLOCK_SIZE), + file_table: FileTable::new(), readdir_buf: Vec::new(), org_inodes: HashMap::new(), org_slots: orgs @@ -189,9 +191,7 @@ impl MesaFS { .unwrap_or(0); trace!( ino = existing_ino, - org_idx, - rc, - "ensure_org_inode: reusing existing inode" + org_idx, rc, "ensure_org_inode: reusing existing inode" ); return (existing_ino, attr); } @@ -256,7 +256,7 @@ impl MesaFS { /// Allocate a mesa-level file handle and map it through the bridge. fn alloc_fh(&mut self, slot_idx: usize, org_fh: FileHandle) -> FileHandle { - let fh = self.icache.allocate_fh(); + let fh = self.file_table.allocate(); self.org_slots[slot_idx].bridge.insert_fh(fh, org_fh); fh } @@ -327,8 +327,9 @@ impl Fs for MesaFS { let org_attr = self.org_slots[idx].org.lookup(org_parent, name).await?; let org_ino = org_attr.common().ino; - let mesa_ino = - self.translate_org_ino_to_mesa(idx, org_ino, parent, name).await; + let mesa_ino = self + .translate_org_ino_to_mesa(idx, org_ino, parent, name) + .await; let mesa_attr = self.org_slots[idx].bridge.attr_backward(org_attr); self.icache.cache_attr(mesa_ino, mesa_attr).await; @@ -387,12 +388,15 @@ impl Fs for MesaFS { let mut mesa_entries = Vec::with_capacity(org_entries.len()); for entry in &org_entries { - let mesa_child_ino = - self.translate_org_ino_to_mesa(idx, entry.ino, ino, &entry.name).await; + let mesa_child_ino = self + .translate_org_ino_to_mesa(idx, entry.ino, ino, &entry.name) + .await; // Cache attr from org if available. - if let Some(org_icb_attr) = - self.org_slots[idx].org.inode_table_get_attr(entry.ino).await + if let Some(org_icb_attr) = self.org_slots[idx] + .org + .inode_table_get_attr(entry.ino) + .await { let mesa_attr = self.org_slots[idx].bridge.attr_backward(org_icb_attr); self.icache.cache_attr(mesa_child_ino, mesa_attr).await; diff --git a/src/fs/mescloud/org.rs b/src/fs/mescloud/org.rs index a404393..198f551 100644 --- a/src/fs/mescloud/org.rs +++ b/src/fs/mescloud/org.rs @@ -17,7 +17,7 @@ use super::icache as mescloud_icache; use super::icache::MescloudICache; use super::repo::RepoFs; use crate::fs::icache::bridge::HashMapBridge; -use crate::fs::icache::{AsyncICache, IcbResolver}; +use crate::fs::icache::{AsyncICache, FileTable, IcbResolver}; use crate::fs::r#trait::{ DirEntry, DirEntryType, FileAttr, FileHandle, FilesystemStats, Fs, Inode, LockOwner, OpenFile, OpenFlags, @@ -103,6 +103,7 @@ pub struct OrgFs { client: MesaClient, icache: MescloudICache, + file_table: FileTable, readdir_buf: Vec, /// Maps org-level repo-root inodes → index into `repos`. @@ -215,6 +216,7 @@ impl OrgFs { name, client, icache: MescloudICache::new(resolver, Self::ROOT_INO, fs_owner, Self::BLOCK_SIZE), + file_table: FileTable::new(), readdir_buf: Vec::new(), repo_inodes: HashMap::new(), owner_inodes: HashMap::new(), @@ -285,12 +287,7 @@ impl OrgFs { if self.repos[idx].repo.repo_name() == repo_name { if let Some(attr) = self.icache.get_attr(ino).await { let rc = self.icache.get_icb(ino, |icb| icb.rc).await.unwrap_or(0); - trace!( - ino, - repo = repo_name, - rc, - "ensure_repo_inode: reusing" - ); + trace!(ino, repo = repo_name, rc, "ensure_repo_inode: reusing"); return (ino, attr); } // Attr missing — rebuild. @@ -381,7 +378,7 @@ impl OrgFs { /// Allocate an org-level file handle and map it through the bridge. fn alloc_fh(&mut self, slot_idx: usize, repo_fh: FileHandle) -> FileHandle { - let fh = self.icache.allocate_fh(); + let fh = self.file_table.allocate(); self.repos[slot_idx].bridge.insert_fh(fh, repo_fh); fh } @@ -460,12 +457,7 @@ impl Fs for OrgFs { let repo = self.wait_for_sync(name_str).await?; let (ino, attr) = self - .ensure_repo_inode( - name_str, - name_str, - &repo.default_branch, - Self::ROOT_INO, - ) + .ensure_repo_inode(name_str, name_str, &repo.default_branch, Self::ROOT_INO) .await; let rc = self.icache.inc_rc(ino).await; trace!(ino, repo = name_str, rc, "lookup: resolved repo inode"); @@ -513,8 +505,9 @@ impl Fs for OrgFs { // Translate back to org namespace. let kind: DirEntryType = repo_attr.into(); - let org_ino = - self.translate_repo_ino_to_org(idx, repo_ino, parent, name, kind).await; + let org_ino = self + .translate_repo_ino_to_org(idx, repo_ino, parent, name, kind) + .await; // Rebuild attr with org inode. let org_attr = self.repos[idx].bridge.attr_backward(repo_attr); @@ -570,12 +563,7 @@ impl Fs for OrgFs { let mut entries = Vec::with_capacity(repo_infos.len()); for (repo_name, default_branch) in &repo_infos { let (repo_ino, _) = self - .ensure_repo_inode( - repo_name, - repo_name, - default_branch, - Self::ROOT_INO, - ) + .ensure_repo_inode(repo_name, repo_name, default_branch, Self::ROOT_INO) .await; entries.push(DirEntry { ino: repo_ino, @@ -605,13 +593,7 @@ impl Fs for OrgFs { let mut org_entries = Vec::with_capacity(repo_entries.len()); for entry in &repo_entries { let org_child_ino = self - .translate_repo_ino_to_org( - idx, - entry.ino, - ino, - &entry.name, - entry.kind, - ) + .translate_repo_ino_to_org(idx, entry.ino, ino, &entry.name, entry.kind) .await; // Cache attr from repo if available. diff --git a/src/fs/mescloud/repo.rs b/src/fs/mescloud/repo.rs index 56381f8..3382e13 100644 --- a/src/fs/mescloud/repo.rs +++ b/src/fs/mescloud/repo.rs @@ -12,7 +12,7 @@ use mesa_dev::low_level::content::{Content, DirEntry as MesaDirEntry}; use num_traits::cast::ToPrimitive as _; use tracing::{instrument, trace, warn}; -use crate::fs::icache::{AsyncICache, IcbResolver}; +use crate::fs::icache::{AsyncICache, FileTable, IcbResolver}; use crate::fs::r#trait::{ DirEntry, DirEntryType, FileAttr, FileHandle, FileOpenOptions, FilesystemStats, Fs, Inode, LockOwner, OpenFile, OpenFlags, @@ -144,6 +144,7 @@ pub struct RepoFs { ref_: String, icache: MescloudICache, + file_table: FileTable, readdir_buf: Vec, open_files: HashMap, } @@ -174,6 +175,7 @@ impl RepoFs { repo_name, ref_, icache: MescloudICache::new(resolver, Self::ROOT_INO, fs_owner, Self::BLOCK_SIZE), + file_table: FileTable::new(), readdir_buf: Vec::new(), open_files: HashMap::new(), } @@ -360,7 +362,7 @@ impl Fs for RepoFs { ), "open: inode {ino} has non-file cached attr" ); - let fh = self.icache.allocate_fh(); + let fh = self.file_table.allocate(); self.open_files.insert(fh, ino); trace!(ino, fh, "assigned file handle"); Ok(OpenFile { From 4f7420061b7b0a6f4fda2b2ca1bb93b08b860d53 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 12:26:24 -0800 Subject: [PATCH 16/57] chore: update cache.rs module doc to reflect removed file handle allocation --- src/fs/icache/cache.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/fs/icache/cache.rs b/src/fs/icache/cache.rs index 6a6a416..a3818f9 100644 --- a/src/fs/icache/cache.rs +++ b/src/fs/icache/cache.rs @@ -1,4 +1,4 @@ -//! Generic inode table with reference counting and file handle allocation. +//! Generic inode table with reference counting. use std::collections::HashMap; From 7ab6c7829c6ae7473a515ebd62be6bc63a907c34 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 12:29:13 -0800 Subject: [PATCH 17/57] chore: add #[must_use] to FileTable per project conventions --- src/fs/icache/file_table.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/fs/icache/file_table.rs b/src/fs/icache/file_table.rs index 2ed810d..4596f93 100644 --- a/src/fs/icache/file_table.rs +++ b/src/fs/icache/file_table.rs @@ -3,6 +3,7 @@ use std::sync::atomic::{AtomicU64, Ordering}; use crate::fs::r#trait::FileHandle; /// Monotonically increasing file handle allocator. +#[must_use] pub struct FileTable { next_fh: AtomicU64, } @@ -14,6 +15,7 @@ impl FileTable { } } + #[must_use] pub fn allocate(&self) -> FileHandle { self.next_fh.fetch_add(1, Ordering::Relaxed) } From 849f789d41adadfbe5ea8adcaf3755450bf1e07a Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 13:10:24 -0800 Subject: [PATCH 18/57] feat: add children field to InodeControlBlock for directory caching Also includes prerequisite: make AsyncICache::contains synchronous and add contains_resolved method. --- src/fs/icache/async_cache.rs | 74 +++++++++++++++++------ src/fs/icache/inode_factory.rs | 2 +- src/fs/mescloud/icache.rs | 107 +++++++++++++++++++++++++++++++-- src/fs/mescloud/mod.rs | 4 ++ src/fs/mescloud/org.rs | 5 ++ src/fs/mescloud/repo.rs | 9 +-- 6 files changed, 174 insertions(+), 27 deletions(-) diff --git a/src/fs/icache/async_cache.rs b/src/fs/icache/async_cache.rs index f16ef8f..c5606fa 100644 --- a/src/fs/icache/async_cache.rs +++ b/src/fs/icache/async_cache.rs @@ -108,9 +108,22 @@ impl AsyncICache { } } - /// Check whether `ino` exists. **Awaits** if the entry is `InFlight`. - pub async fn contains(&self, ino: Inode) -> bool { - self.wait_for_available(ino).await + /// Check whether `ino` has an entry in the table (either `InFlight` or `Available`). + /// + /// This is a non-blocking, synchronous check. It does **not** wait for + /// `InFlight` entries to resolve. + pub fn contains(&self, ino: Inode) -> bool { + self.inode_table.contains_sync(&ino) + } + + /// Check whether `ino` is fully resolved (`Available`). + /// + /// Returns `false` if the entry is missing **or** still `InFlight`. + /// This is a non-blocking, synchronous check. + pub fn contains_resolved(&self, ino: Inode) -> bool { + self.inode_table + .read_sync(&ino, |_, s| matches!(s, IcbState::Available(_))) + .unwrap_or(false) } /// Read an ICB via closure. **Awaits** if `InFlight`. @@ -453,13 +466,43 @@ mod tests { #[tokio::test] async fn contains_returns_true_for_root() { let cache = test_cache(); - assert!(cache.contains(1).await, "root should exist"); + assert!(cache.contains(1), "root should exist"); } #[tokio::test] async fn contains_returns_false_for_missing() { let cache = test_cache(); - assert!(!cache.contains(999).await, "missing inode should not exist"); + assert!(!cache.contains(999), "missing inode should not exist"); + } + + #[tokio::test] + async fn contains_resolved_returns_true_for_root() { + let cache = test_cache(); + assert!(cache.contains_resolved(1), "root should be resolved"); + } + + #[tokio::test] + async fn contains_resolved_returns_false_for_missing() { + let cache = test_cache(); + assert!( + !cache.contains_resolved(999), + "missing inode should not be resolved" + ); + } + + #[tokio::test] + async fn contains_resolved_returns_false_for_inflight() { + let cache = test_cache(); + let (_tx, rx) = watch::channel(()); + cache + .inode_table + .upsert_async(42, IcbState::InFlight(rx)) + .await; + assert!(cache.contains(42), "InFlight entry should exist"); + assert!( + !cache.contains_resolved(42), + "InFlight entry should not be resolved" + ); } #[tokio::test] @@ -483,7 +526,7 @@ mod tests { .await .expect("task panicked") .expect("resolve failed"); - assert!(cache.contains(42).await, "should be true after resolve"); + assert!(cache.contains(42), "should be true after resolve"); } #[tokio::test] @@ -554,7 +597,7 @@ mod tests { }, ) .await; - assert!(cache.contains(42).await, "inserted entry should exist"); + assert!(cache.contains(42), "inserted entry should exist"); assert_eq!(cache.inode_count(), 2, "root + inserted = 2"); } @@ -690,7 +733,7 @@ mod tests { let evicted = cache.forget(42, 3).await; assert!(evicted.is_some(), "rc 3 - 3 = 0, should evict"); - assert!(!cache.contains(42).await, "evicted entry should be gone"); + assert!(!cache.contains(42), "evicted entry should be gone"); assert_eq!(cache.inode_count(), 1, "only root remains"); } @@ -776,13 +819,10 @@ mod tests { .await; drop(tx); - // This must NOT hang - let result = - tokio::time::timeout(std::time::Duration::from_millis(100), cache.contains(42)).await; - assert_eq!( - result, - Ok(true), - "should not hang on already-completed entry" + assert!(cache.contains(42), "entry should exist in table"); + assert!( + cache.contains_resolved(42), + "should be resolved after insert_icb overwrote InFlight" ); } @@ -822,7 +862,7 @@ mod tests { let path: Result = cache.get_or_resolve(42, |icb| icb.path.clone()).await; assert_eq!(path, Ok(PathBuf::from("/resolved"))); // Should now be cached - assert!(cache.contains(42).await); + assert!(cache.contains(42)); } #[tokio::test] @@ -835,7 +875,7 @@ mod tests { cache.get_or_resolve(42, |icb| icb.path.clone()).await; assert_eq!(result, Err("network error".to_owned())); // Entry should be cleaned up on error - assert!(!cache.contains(42).await); + assert!(!cache.contains(42)); } struct CountingResolver { diff --git a/src/fs/icache/inode_factory.rs b/src/fs/icache/inode_factory.rs index 9dcbe65..1a60338 100644 --- a/src/fs/icache/inode_factory.rs +++ b/src/fs/icache/inode_factory.rs @@ -1,5 +1,5 @@ -use std::sync::atomic::{AtomicU64, Ordering}; use crate::fs::r#trait::Inode; +use std::sync::atomic::{AtomicU64, Ordering}; /// Monotonically increasing inode allocator. pub struct InodeFactory { diff --git a/src/fs/mescloud/icache.rs b/src/fs/mescloud/icache.rs index 5e84870..e2231a0 100644 --- a/src/fs/mescloud/icache.rs +++ b/src/fs/mescloud/icache.rs @@ -4,7 +4,9 @@ use std::ffi::OsStr; use std::time::SystemTime; use crate::fs::icache::{AsyncICache, IcbLike, IcbResolver, InodeFactory}; -use crate::fs::r#trait::{CommonFileAttr, FileAttr, FilesystemStats, Inode, Permissions}; +use crate::fs::r#trait::{ + CommonFileAttr, DirEntryType, FileAttr, FilesystemStats, Inode, Permissions, +}; /// Inode control block for mescloud filesystem layers. pub struct InodeControlBlock { @@ -13,6 +15,8 @@ pub struct InodeControlBlock { pub path: std::path::PathBuf, /// Cached file attributes from the last lookup. pub attr: Option, + /// Cached directory children from the resolver (directories only). + pub children: Option>, } impl IcbLike for InodeControlBlock { @@ -22,6 +26,7 @@ impl IcbLike for InodeControlBlock { parent: None, path, attr: None, + children: None, } } @@ -34,7 +39,11 @@ impl IcbLike for InodeControlBlock { } fn needs_resolve(&self) -> bool { - self.attr.is_none() + match self.attr { + None => true, + Some(FileAttr::Directory { .. }) => self.children.is_none(), + Some(_) => false, + } } } @@ -98,8 +107,13 @@ impl> MescloudICache { // -- Delegated from AsyncICache (async) -- - pub async fn contains(&self, ino: Inode) -> bool { - self.inner.contains(ino).await + pub fn contains(&self, ino: Inode) -> bool { + self.inner.contains(ino) + } + + #[expect(dead_code, reason = "public API method for future use")] + pub fn contains_resolved(&self, ino: Inode) -> bool { + self.inner.contains_resolved(ino) } pub async fn get_icb( @@ -204,7 +218,7 @@ impl> MescloudICache { } /// Find an existing child by (parent, name) or allocate a new inode. - /// If new, inserts a stub ICB (parent+path set, attr=None, rc=0). + /// If new, inserts a stub ICB (parent+path set, attr=None, children=None, rc=0). /// Does NOT bump rc. Returns the inode number. pub async fn ensure_child_ino(&self, parent: Inode, name: &OsStr) -> Inode { // Search for existing child by parent + name @@ -229,6 +243,7 @@ impl> MescloudICache { path: name.into(), parent: Some(parent), attr: None, + children: None, }, ) .await; @@ -241,3 +256,85 @@ impl> MescloudICache { &self.inner } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::fs::r#trait::DirEntryType; + + fn dummy_dir_attr(ino: Inode) -> FileAttr { + let now = SystemTime::now(); + FileAttr::Directory { + common: make_common_file_attr(ino, 0o755, now, now, (0, 0), 4096), + } + } + + fn dummy_file_attr(ino: Inode) -> FileAttr { + let now = SystemTime::now(); + FileAttr::RegularFile { + common: make_common_file_attr(ino, 0o644, now, now, (0, 0), 4096), + size: 100, + blocks: 1, + } + } + + #[test] + fn needs_resolve_stub_returns_true() { + let icb = InodeControlBlock { + parent: Some(1), + rc: 0, + path: "stub".into(), + attr: None, + children: None, + }; + assert!(icb.needs_resolve()); + } + + #[test] + fn needs_resolve_file_with_attr_returns_false() { + let icb = InodeControlBlock { + parent: Some(1), + rc: 1, + path: "file.txt".into(), + attr: Some(dummy_file_attr(2)), + children: None, + }; + assert!(!icb.needs_resolve()); + } + + #[test] + fn needs_resolve_dir_without_children_returns_true() { + let icb = InodeControlBlock { + parent: Some(1), + rc: 1, + path: "dir".into(), + attr: Some(dummy_dir_attr(3)), + children: None, + }; + assert!(icb.needs_resolve()); + } + + #[test] + fn needs_resolve_dir_with_children_returns_false() { + let icb = InodeControlBlock { + parent: Some(1), + rc: 1, + path: "dir".into(), + attr: Some(dummy_dir_attr(3)), + children: Some(vec![("README.md".to_owned(), DirEntryType::RegularFile)]), + }; + assert!(!icb.needs_resolve()); + } + + #[test] + fn needs_resolve_dir_with_empty_children_returns_false() { + let icb = InodeControlBlock { + parent: Some(1), + rc: 1, + path: "empty-dir".into(), + attr: Some(dummy_dir_attr(4)), + children: Some(vec![]), + }; + assert!(!icb.needs_resolve()); + } +} diff --git a/src/fs/mescloud/mod.rs b/src/fs/mescloud/mod.rs index e98872c..1b7b835 100644 --- a/src/fs/mescloud/mod.rs +++ b/src/fs/mescloud/mod.rs @@ -64,6 +64,7 @@ impl IcbResolver for MesaResolver { path: "/".into(), rc: 0, attr: None, + children: None, }); let now = SystemTime::now(); let attr = FileAttr::Directory { @@ -73,6 +74,7 @@ impl IcbResolver for MesaResolver { }; Ok(InodeControlBlock { attr: Some(attr), + children: Some(vec![]), ..stub }) } @@ -229,6 +231,7 @@ impl MesaFS { path: org_name.as_str().into(), parent: Some(Self::ROOT_NODE_INO), attr: None, + children: None, }, ) .await; @@ -282,6 +285,7 @@ impl MesaFS { path: name.into(), parent: Some(parent_mesa_ino), attr: None, + children: None, }, |_| {}, ) diff --git a/src/fs/mescloud/org.rs b/src/fs/mescloud/org.rs index 198f551..0771c3c 100644 --- a/src/fs/mescloud/org.rs +++ b/src/fs/mescloud/org.rs @@ -53,6 +53,7 @@ impl IcbResolver for OrgResolver { path: "/".into(), rc: 0, attr: None, + children: None, }); let now = SystemTime::now(); let attr = FileAttr::Directory { @@ -62,6 +63,7 @@ impl IcbResolver for OrgResolver { }; Ok(InodeControlBlock { attr: Some(attr), + children: Some(vec![]), ..stub }) } @@ -184,6 +186,7 @@ impl OrgFs { path: owner.into(), parent: Some(Self::ROOT_INO), attr: None, + children: None, }, ) .await; @@ -329,6 +332,7 @@ impl OrgFs { path: display_name.into(), parent: Some(parent_ino), attr: None, + children: None, }, ) .await; @@ -413,6 +417,7 @@ impl OrgFs { path: name.into(), parent: Some(parent_org_ino), attr: None, + children: None, } }, |icb| { diff --git a/src/fs/mescloud/repo.rs b/src/fs/mescloud/repo.rs index 3382e13..c02cdfe 100644 --- a/src/fs/mescloud/repo.rs +++ b/src/fs/mescloud/repo.rs @@ -101,6 +101,7 @@ impl IcbResolver for RepoResolver { path: stub.path, rc: stub.rc, attr: Some(attr), + children: None, }) } } @@ -228,7 +229,7 @@ impl Fs for RepoFs { #[instrument(skip(self), fields(repo = %self.repo_name))] async fn lookup(&mut self, parent: Inode, name: &OsStr) -> Result { debug_assert!( - self.icache.contains(parent).await, + self.icache.contains(parent), "lookup: parent inode {parent} not in inode table" ); @@ -260,7 +261,7 @@ impl Fs for RepoFs { #[instrument(skip(self), fields(repo = %self.repo_name))] async fn readdir(&mut self, ino: Inode) -> Result<&[DirEntry], ReadDirError> { debug_assert!( - self.icache.contains(ino).await, + self.icache.contains(ino), "readdir: inode {ino} not in inode table" ); debug_assert!( @@ -351,7 +352,7 @@ impl Fs for RepoFs { #[instrument(skip(self), fields(repo = %self.repo_name))] async fn open(&mut self, ino: Inode, _flags: OpenFlags) -> Result { - if !self.icache.contains(ino).await { + if !self.icache.contains(ino) { warn!(ino, "open on unknown inode"); return Err(OpenError::InodeNotFound); } @@ -450,7 +451,7 @@ impl Fs for RepoFs { #[instrument(skip(self), fields(repo = %self.repo_name))] async fn forget(&mut self, ino: Inode, nlookups: u64) { debug_assert!( - self.icache.contains(ino).await, + self.icache.contains(ino), "forget: inode {ino} not in inode table" ); From d649cbdfaed2225a1dbc48bb982d29fbb2e5c82e Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 13:16:47 -0800 Subject: [PATCH 19/57] feat: add From for ReadDirError conversion --- src/fs/mescloud/common.rs | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/src/fs/mescloud/common.rs b/src/fs/mescloud/common.rs index c0f6f6e..537980d 100644 --- a/src/fs/mescloud/common.rs +++ b/src/fs/mescloud/common.rs @@ -137,6 +137,15 @@ pub enum ReadDirError { NotPermitted, } +impl From for ReadDirError { + fn from(e: LookupError) -> Self { + match e { + LookupError::RemoteMesaError(api) => Self::RemoteMesaError(api), + LookupError::InodeNotFound | LookupError::FileDoesNotExist => Self::InodeNotFound, + } + } +} + impl From for i32 { fn from(e: ReadDirError) -> Self { match e { @@ -161,3 +170,30 @@ impl From for i32 { } } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn lookup_inode_not_found_converts_to_readdir_inode_not_found() { + let err: ReadDirError = LookupError::InodeNotFound.into(); + assert!(matches!(err, ReadDirError::InodeNotFound)); + } + + #[test] + fn lookup_file_does_not_exist_converts_to_readdir_inode_not_found() { + let err: ReadDirError = LookupError::FileDoesNotExist.into(); + assert!(matches!(err, ReadDirError::InodeNotFound)); + } + + #[test] + fn lookup_remote_error_converts_to_readdir_remote_error() { + let api_err = MesaApiError::Response { + status: 500, + body: "test".to_owned(), + }; + let err: ReadDirError = LookupError::RemoteMesaError(api_err).into(); + assert!(matches!(err, ReadDirError::RemoteMesaError(_))); + } +} From 3454ec34a5adeb3b79d5f3ef59a049060ad828df Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 13:16:48 -0800 Subject: [PATCH 20/57] feat: populate children in RepoResolver for directory inodes --- src/fs/mescloud/repo.rs | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/src/fs/mescloud/repo.rs b/src/fs/mescloud/repo.rs index c02cdfe..d4a1cf1 100644 --- a/src/fs/mescloud/repo.rs +++ b/src/fs/mescloud/repo.rs @@ -96,12 +96,30 @@ impl IcbResolver for RepoResolver { }, }; + let children = match content { + Content::Dir(d) => Some( + d.entries + .into_iter() + .filter_map(|e| { + let (name, kind) = match e { + MesaDirEntry::File(f) => (f.name?, DirEntryType::RegularFile), + // TODO(MES-712): return DirEntryType::Symlink once readlink is wired up. + MesaDirEntry::Symlink(s) => (s.name?, DirEntryType::RegularFile), + MesaDirEntry::Dir(d) => (d.name?, DirEntryType::Directory), + }; + Some((name, kind)) + }) + .collect(), + ), + Content::File(_) | Content::Symlink(_) => None, + }; + Ok(InodeControlBlock { parent: stub.parent, path: stub.path, rc: stub.rc, attr: Some(attr), - children: None, + children, }) } } From 32e95178ec16f16ce79e757ef7cdc2e6c3ea261e Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 13:19:08 -0800 Subject: [PATCH 21/57] feat: readdir reads from icache instead of querying API directly --- src/fs/mescloud/repo.rs | 45 +++++++++++------------------------------ 1 file changed, 12 insertions(+), 33 deletions(-) diff --git a/src/fs/mescloud/repo.rs b/src/fs/mescloud/repo.rs index d4a1cf1..1b7e89f 100644 --- a/src/fs/mescloud/repo.rs +++ b/src/fs/mescloud/repo.rs @@ -290,42 +290,21 @@ impl Fs for RepoFs { "readdir: inode {ino} has non-directory cached attr" ); - let file_path = self.path_of_inode(ino).await; - - let content = self - .client - .org(&self.org_name) - .repos() - .at(&self.repo_name) - .content() - .get(Some(self.ref_.as_str()), file_path.as_deref(), None) - .await - .map_err(MesaApiError::from)?; - - let mesa_entries = match content { - Content::Dir(d) => d.entries, - Content::File(_) | Content::Symlink(_) => return Err(ReadDirError::NotADirectory), - }; - - let collected: Vec<(String, DirEntryType)> = mesa_entries - .into_iter() - .filter_map(|e| { - let (name, kind) = match e { - MesaDirEntry::File(f) => (f.name?, DirEntryType::RegularFile), - // TODO(MES-712): return DirEntryType::Symlink once readlink is wired up. - MesaDirEntry::Symlink(s) => (s.name?, DirEntryType::RegularFile), - MesaDirEntry::Dir(d) => (d.name?, DirEntryType::Directory), - }; - Some((name, kind)) - }) - .collect(); + let children = self + .icache + .get_or_resolve(ino, |icb| icb.children.clone()) + .await? + .ok_or(ReadDirError::NotADirectory)?; - trace!(ino, path = ?file_path, count = collected.len(), "fetched directory listing"); + trace!( + ino, + count = children.len(), + "readdir: resolved directory listing from icache" + ); - let mut entries = Vec::with_capacity(collected.len()); - for (name, kind) in &collected { + let mut entries = Vec::with_capacity(children.len()); + for (name, kind) in &children { let child_ino = self.icache.ensure_child_ino(ino, OsStr::new(name)).await; - // Insert fully-populated attr (bypassing resolver) since we know the kind from the listing let now = SystemTime::now(); let attr = match kind { DirEntryType::Directory => FileAttr::Directory { From 7f0e387cd524ba2476add07bdc372d55422a77fd Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 13:38:44 -0800 Subject: [PATCH 22/57] refactor: add CompositeFs and ChildSlot structs for shared delegation --- src/fs/mescloud/composite.rs | 38 ++++++++++++++++++++++++++++++++++++ src/fs/mescloud/mod.rs | 1 + 2 files changed, 39 insertions(+) create mode 100644 src/fs/mescloud/composite.rs diff --git a/src/fs/mescloud/composite.rs b/src/fs/mescloud/composite.rs new file mode 100644 index 0000000..3fc1542 --- /dev/null +++ b/src/fs/mescloud/composite.rs @@ -0,0 +1,38 @@ +use std::collections::HashMap; + +use crate::fs::icache::bridge::HashMapBridge; +use crate::fs::icache::{FileTable, IcbResolver}; +use crate::fs::r#trait::{DirEntry, Inode}; + +use super::icache::{InodeControlBlock, MescloudICache}; + +/// A child filesystem slot: inner filesystem + bidirectional inode/fh bridge. +#[expect( + dead_code, + reason = "will be used when MesaFS/OrgFs are refactored to use CompositeFs" +)] +pub(super) struct ChildSlot { + pub inner: Inner, + pub bridge: HashMapBridge, +} + +/// Generic compositing filesystem that delegates to child `Inner` filesystems. +/// +/// Holds the shared infrastructure (icache, file table, readdir buffer, child +/// slots) and implements all the delegation methods that `MesaFS` and `OrgFs` +/// previously duplicated. +#[expect( + dead_code, + reason = "will be used when MesaFS/OrgFs are refactored to use CompositeFs" +)] +pub(super) struct CompositeFs +where + R: IcbResolver, +{ + pub icache: MescloudICache, + pub file_table: FileTable, + pub readdir_buf: Vec, + /// Maps outer inode to index into `slots` for child-root inodes. + pub child_inodes: HashMap, + pub slots: Vec>, +} diff --git a/src/fs/mescloud/mod.rs b/src/fs/mescloud/mod.rs index 1b7b835..6e46964 100644 --- a/src/fs/mescloud/mod.rs +++ b/src/fs/mescloud/mod.rs @@ -21,6 +21,7 @@ const MESA_API_BASE_URL: &str = "https://staging.depot.mesa.dev/api/v1"; const MESA_API_BASE_URL: &str = "https://depot.mesa.dev/api/v1"; mod common; +mod composite; use common::InodeControlBlock; pub use common::{GetAttrError, LookupError, OpenError, ReadDirError, ReadError, ReleaseError}; From ca69df2a9bed7613e07b17883ed56e524d16c8c6 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 13:42:10 -0800 Subject: [PATCH 23/57] refactor: extract InodeCachePeek trait from inode_table_get_attr --- src/fs/mescloud/common.rs | 8 ++++++++ src/fs/mescloud/mod.rs | 7 ++----- src/fs/mescloud/org.rs | 17 +++++++++-------- src/fs/mescloud/repo.rs | 12 +++++++----- 4 files changed, 26 insertions(+), 18 deletions(-) diff --git a/src/fs/mescloud/common.rs b/src/fs/mescloud/common.rs index 537980d..340b588 100644 --- a/src/fs/mescloud/common.rs +++ b/src/fs/mescloud/common.rs @@ -3,6 +3,8 @@ use mesa_dev::low_level::apis; use thiserror::Error; +use crate::fs::r#trait::{FileAttr, Inode}; + pub(super) use super::icache::InodeControlBlock; /// A concrete error type that preserves the structure of `mesa_dev::low_level::apis::Error` @@ -171,6 +173,12 @@ impl From for i32 { } } +/// Allows a parent compositor to peek at cached attrs from a child filesystem. +#[async_trait::async_trait] +pub(super) trait InodeCachePeek { + async fn peek_attr(&self, ino: Inode) -> Option; +} + #[cfg(test)] mod tests { use super::*; diff --git a/src/fs/mescloud/mod.rs b/src/fs/mescloud/mod.rs index 6e46964..cd024e6 100644 --- a/src/fs/mescloud/mod.rs +++ b/src/fs/mescloud/mod.rs @@ -22,6 +22,7 @@ const MESA_API_BASE_URL: &str = "https://depot.mesa.dev/api/v1"; mod common; mod composite; +use common::InodeCachePeek as _; use common::InodeControlBlock; pub use common::{GetAttrError, LookupError, OpenError, ReadDirError, ReadError, ReleaseError}; @@ -398,11 +399,7 @@ impl Fs for MesaFS { .await; // Cache attr from org if available. - if let Some(org_icb_attr) = self.org_slots[idx] - .org - .inode_table_get_attr(entry.ino) - .await - { + if let Some(org_icb_attr) = self.org_slots[idx].org.peek_attr(entry.ino).await { let mesa_attr = self.org_slots[idx].bridge.attr_backward(org_icb_attr); self.icache.cache_attr(mesa_child_ino, mesa_attr).await; } diff --git a/src/fs/mescloud/org.rs b/src/fs/mescloud/org.rs index 0771c3c..6c23553 100644 --- a/src/fs/mescloud/org.rs +++ b/src/fs/mescloud/org.rs @@ -9,6 +9,7 @@ use mesa_dev::MesaClient; use secrecy::SecretString; use tracing::{instrument, trace, warn}; +use super::common::InodeCachePeek as _; pub use super::common::{ GetAttrError, LookupError, OpenError, ReadDirError, ReadError, ReleaseError, }; @@ -205,11 +206,6 @@ impl OrgFs { (ino, attr) } - /// Get the cached attr for an inode, if present. - pub(crate) async fn inode_table_get_attr(&self, ino: Inode) -> Option { - self.icache.get_attr(ino).await - } - pub fn new(name: String, client: MesaClient, fs_owner: (u32, u32)) -> Self { let resolver = OrgResolver { fs_owner, @@ -432,6 +428,13 @@ impl OrgFs { } } +#[async_trait::async_trait] +impl super::common::InodeCachePeek for OrgFs { + async fn peek_attr(&self, ino: Inode) -> Option { + self.icache.get_attr(ino).await + } +} + #[async_trait::async_trait] impl Fs for OrgFs { type LookupError = LookupError; @@ -602,9 +605,7 @@ impl Fs for OrgFs { .await; // Cache attr from repo if available. - if let Some(repo_icb_attr) = - self.repos[idx].repo.inode_table_get_attr(entry.ino).await - { + if let Some(repo_icb_attr) = self.repos[idx].repo.peek_attr(entry.ino).await { let org_attr = self.repos[idx].bridge.attr_backward(repo_icb_attr); self.icache.cache_attr(org_child_ino, org_attr).await; } else { diff --git a/src/fs/mescloud/repo.rs b/src/fs/mescloud/repo.rs index 1b7e89f..23737ab 100644 --- a/src/fs/mescloud/repo.rs +++ b/src/fs/mescloud/repo.rs @@ -205,11 +205,6 @@ impl RepoFs { &self.repo_name } - /// Get the cached attr for an inode, if present. - pub(crate) async fn inode_table_get_attr(&self, ino: Inode) -> Option { - self.icache.get_attr(ino).await - } - /// Build the repo-relative path for an inode by walking up the parent chain. /// /// Returns `None` for the root inode (the repo top-level maps to `path=None` in the @@ -235,6 +230,13 @@ impl RepoFs { } } +#[async_trait::async_trait] +impl super::common::InodeCachePeek for RepoFs { + async fn peek_attr(&self, ino: Inode) -> Option { + self.icache.get_attr(ino).await + } +} + #[async_trait::async_trait] impl Fs for RepoFs { type LookupError = LookupError; From fb930e3c366516cd385f1d5f3ee2d53cc3c75fbc Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 13:46:26 -0800 Subject: [PATCH 24/57] refactor: implement delegation methods on CompositeFs --- src/fs/mescloud/composite.rs | 264 +++++++++++++++++++++++++++++++++-- 1 file changed, 255 insertions(+), 9 deletions(-) diff --git a/src/fs/mescloud/composite.rs b/src/fs/mescloud/composite.rs index 3fc1542..23550dc 100644 --- a/src/fs/mescloud/composite.rs +++ b/src/fs/mescloud/composite.rs @@ -1,16 +1,21 @@ use std::collections::HashMap; +use std::ffi::OsStr; + +use bytes::Bytes; +use tracing::{trace, warn}; use crate::fs::icache::bridge::HashMapBridge; use crate::fs::icache::{FileTable, IcbResolver}; -use crate::fs::r#trait::{DirEntry, Inode}; +use crate::fs::r#trait::{ + DirEntry, FileAttr, FileHandle, FilesystemStats, Fs, Inode, LockOwner, OpenFile, OpenFlags, +}; +use super::common::{ + GetAttrError, InodeCachePeek, LookupError, OpenError, ReadDirError, ReadError, ReleaseError, +}; use super::icache::{InodeControlBlock, MescloudICache}; /// A child filesystem slot: inner filesystem + bidirectional inode/fh bridge. -#[expect( - dead_code, - reason = "will be used when MesaFS/OrgFs are refactored to use CompositeFs" -)] pub(super) struct ChildSlot { pub inner: Inner, pub bridge: HashMapBridge, @@ -21,10 +26,6 @@ pub(super) struct ChildSlot { /// Holds the shared infrastructure (icache, file table, readdir buffer, child /// slots) and implements all the delegation methods that `MesaFS` and `OrgFs` /// previously duplicated. -#[expect( - dead_code, - reason = "will be used when MesaFS/OrgFs are refactored to use CompositeFs" -)] pub(super) struct CompositeFs where R: IcbResolver, @@ -36,3 +37,248 @@ where pub child_inodes: HashMap, pub slots: Vec>, } + +#[expect( + dead_code, + reason = "will be used when MesaFS/OrgFs are refactored to use CompositeFs" +)] +impl CompositeFs +where + R: IcbResolver, + Inner: Fs< + LookupError = LookupError, + GetAttrError = GetAttrError, + OpenError = OpenError, + ReadError = ReadError, + ReaddirError = ReadDirError, + ReleaseError = ReleaseError, + > + InodeCachePeek + + Send + + Sync, +{ + /// Walk the parent chain to find which child slot owns an inode. + pub async fn slot_for_inode(&self, ino: Inode) -> Option { + if let Some(&idx) = self.child_inodes.get(&ino) { + return Some(idx); + } + let mut current = ino; + loop { + let parent = self + .icache + .get_icb(current, |icb| icb.parent) + .await + .flatten()?; + if let Some(&idx) = self.child_inodes.get(&parent) { + return Some(idx); + } + current = parent; + } + } + + /// Allocate an outer file handle and map it through the bridge. + #[must_use] + pub fn alloc_fh(&mut self, slot_idx: usize, inner_fh: FileHandle) -> FileHandle { + let fh = self.file_table.allocate(); + self.slots[slot_idx].bridge.insert_fh(fh, inner_fh); + fh + } + + /// Translate an inner inode to an outer inode, allocating if needed. + /// Also inserts a stub ICB into the outer icache when the inode is new. + pub async fn translate_inner_ino( + &mut self, + slot_idx: usize, + inner_ino: Inode, + parent_outer_ino: Inode, + name: &OsStr, + ) -> Inode { + let outer_ino = self.slots[slot_idx] + .bridge + .backward_or_insert_inode(inner_ino, || self.icache.allocate_inode()); + self.icache + .entry_or_insert_icb( + outer_ino, + || InodeControlBlock { + rc: 0, + path: name.into(), + parent: Some(parent_outer_ino), + attr: None, + children: None, + }, + |_| {}, + ) + .await; + outer_ino + } + + /// Get cached file attributes for an inode. + pub async fn delegated_getattr(&self, ino: Inode) -> Result { + self.icache.get_attr(ino).await.ok_or_else(|| { + warn!(ino, "getattr on unknown inode"); + GetAttrError::InodeNotFound + }) + } + + /// Find slot, forward inode, delegate to inner, allocate outer file handle. + pub async fn delegated_open( + &mut self, + ino: Inode, + flags: OpenFlags, + ) -> Result { + let idx = self.slot_for_inode(ino).await.ok_or_else(|| { + warn!(ino, "open on inode not belonging to any child"); + OpenError::InodeNotFound + })?; + let inner_ino = self.slots[idx] + .bridge + .forward_or_insert_inode(ino, || unreachable!("open: ino should be mapped")); + let inner_open = self.slots[idx].inner.open(inner_ino, flags).await?; + let outer_fh = self.alloc_fh(idx, inner_open.handle); + trace!( + ino, + outer_fh, + inner_fh = inner_open.handle, + "open: assigned file handle" + ); + Ok(OpenFile { + handle: outer_fh, + options: inner_open.options, + }) + } + + /// Find slot, forward inode and file handle, delegate read to inner. + #[expect(clippy::too_many_arguments, reason = "mirrors fuser read API")] + pub async fn delegated_read( + &mut self, + ino: Inode, + fh: FileHandle, + offset: u64, + size: u32, + flags: OpenFlags, + lock_owner: Option, + ) -> Result { + let idx = self.slot_for_inode(ino).await.ok_or_else(|| { + warn!(ino, "read on inode not belonging to any child"); + ReadError::InodeNotFound + })?; + let inner_ino = self.slots[idx] + .bridge + .forward_or_insert_inode(ino, || unreachable!("read: ino should be mapped")); + let inner_fh = self.slots[idx].bridge.fh_forward(fh).ok_or_else(|| { + warn!(fh, "read: no fh mapping found"); + ReadError::FileNotOpen + })?; + self.slots[idx] + .inner + .read(inner_ino, inner_fh, offset, size, flags, lock_owner) + .await + } + + /// Find slot, forward inode and file handle, delegate release to inner, + /// then clean up the file handle mapping. + pub async fn delegated_release( + &mut self, + ino: Inode, + fh: FileHandle, + flags: OpenFlags, + flush: bool, + ) -> Result<(), ReleaseError> { + let idx = self.slot_for_inode(ino).await.ok_or_else(|| { + warn!(ino, "release on inode not belonging to any child"); + ReleaseError::FileNotOpen + })?; + let inner_ino = self.slots[idx] + .bridge + .forward_or_insert_inode(ino, || unreachable!("release: ino should be mapped")); + let inner_fh = self.slots[idx].bridge.fh_forward(fh).ok_or_else(|| { + warn!(fh, "release: no fh mapping found"); + ReleaseError::FileNotOpen + })?; + let result = self.slots[idx] + .inner + .release(inner_ino, inner_fh, flags, flush) + .await; + self.slots[idx].bridge.remove_fh_by_left(fh); + trace!(ino, fh, "release: cleaned up fh mapping"); + result + } + + /// Propagate forget to the inner filesystem, evict from icache, and clean + /// up bridge mappings. Returns `true` if the inode was evicted. + #[must_use] + pub async fn delegated_forget(&mut self, ino: Inode, nlookups: u64) -> bool { + if let Some(idx) = self.slot_for_inode(ino).await + && let Some(&inner_ino) = self.slots[idx].bridge.inode_map_get_by_left(ino) + { + self.slots[idx].inner.forget(inner_ino, nlookups).await; + } + if self.icache.forget(ino, nlookups).await.is_some() { + self.child_inodes.remove(&ino); + for slot in &mut self.slots { + slot.bridge.remove_inode_by_left(ino); + } + true + } else { + false + } + } + + /// Return filesystem statistics from the icache. + #[must_use] + pub fn delegated_statfs(&self) -> FilesystemStats { + self.icache.statfs() + } + + /// Delegation branch for lookup when the parent is owned by a child slot. + pub async fn delegated_lookup( + &mut self, + parent: Inode, + name: &OsStr, + ) -> Result { + let idx = self + .slot_for_inode(parent) + .await + .ok_or(LookupError::InodeNotFound)?; + let inner_parent = self.slots[idx] + .bridge + .forward_or_insert_inode(parent, || unreachable!("lookup: parent should be mapped")); + let inner_attr = self.slots[idx].inner.lookup(inner_parent, name).await?; + let inner_ino = inner_attr.common().ino; + let outer_ino = self.translate_inner_ino(idx, inner_ino, parent, name).await; + let outer_attr = self.slots[idx].bridge.attr_backward(inner_attr); + self.icache.cache_attr(outer_ino, outer_attr).await; + let rc = self.icache.inc_rc(outer_ino).await; + trace!(outer_ino, inner_ino, rc, "lookup: resolved via delegation"); + Ok(outer_attr) + } + + /// Delegation branch for readdir when the inode is owned by a child slot. + pub async fn delegated_readdir(&mut self, ino: Inode) -> Result<&[DirEntry], ReadDirError> { + let idx = self + .slot_for_inode(ino) + .await + .ok_or(ReadDirError::InodeNotFound)?; + let inner_ino = self.slots[idx] + .bridge + .forward_or_insert_inode(ino, || unreachable!("readdir: ino should be mapped")); + let inner_entries = self.slots[idx].inner.readdir(inner_ino).await?; + let inner_entries: Vec = inner_entries.to_vec(); + let mut outer_entries = Vec::with_capacity(inner_entries.len()); + for entry in &inner_entries { + let outer_child_ino = self + .translate_inner_ino(idx, entry.ino, ino, &entry.name) + .await; + if let Some(inner_attr) = self.slots[idx].inner.peek_attr(entry.ino).await { + let outer_attr = self.slots[idx].bridge.attr_backward(inner_attr); + self.icache.cache_attr(outer_child_ino, outer_attr).await; + } + outer_entries.push(DirEntry { + ino: outer_child_ino, + name: entry.name.clone(), + kind: entry.kind, + }); + } + self.readdir_buf = outer_entries; + Ok(&self.readdir_buf) + } +} From 2d72138f5a9e7b1f5a5502eea61265e3eeb4f63b Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 13:55:21 -0800 Subject: [PATCH 25/57] refactor: MesaFS now delegates to CompositeFs --- src/fs/mescloud/composite.rs | 4 - src/fs/mescloud/mod.rs | 327 ++++++++--------------------------- 2 files changed, 75 insertions(+), 256 deletions(-) diff --git a/src/fs/mescloud/composite.rs b/src/fs/mescloud/composite.rs index 23550dc..bae5b46 100644 --- a/src/fs/mescloud/composite.rs +++ b/src/fs/mescloud/composite.rs @@ -38,10 +38,6 @@ where pub slots: Vec>, } -#[expect( - dead_code, - reason = "will be used when MesaFS/OrgFs are refactored to use CompositeFs" -)] impl CompositeFs where R: IcbResolver, diff --git a/src/fs/mescloud/mod.rs b/src/fs/mescloud/mod.rs index cd024e6..150b831 100644 --- a/src/fs/mescloud/mod.rs +++ b/src/fs/mescloud/mod.rs @@ -15,6 +15,8 @@ use crate::fs::r#trait::{ OpenFlags, }; +use composite::{ChildSlot, CompositeFs}; + #[cfg(feature = "staging")] const MESA_API_BASE_URL: &str = "https://staging.depot.mesa.dev/api/v1"; #[cfg(not(feature = "staging"))] @@ -22,7 +24,6 @@ const MESA_API_BASE_URL: &str = "https://depot.mesa.dev/api/v1"; mod common; mod composite; -use common::InodeCachePeek as _; use common::InodeControlBlock; pub use common::{GetAttrError, LookupError, OpenError, ReadDirError, ReadError, ReleaseError}; @@ -36,10 +37,6 @@ use org::OrgFs; pub mod icache; pub mod repo; -// --------------------------------------------------------------------------- -// MesaResolver -// --------------------------------------------------------------------------- - struct MesaResolver { fs_owner: (u32, u32), block_size: u32, @@ -83,36 +80,20 @@ impl IcbResolver for MesaResolver { } } -// --------------------------------------------------------------------------- -// MesaFS -// --------------------------------------------------------------------------- - -/// Per-org wrapper with inode and file handle translation. -struct OrgSlot { - org: OrgFs, - bridge: HashMapBridge, // left = mesa, right = org -} - /// Classifies an inode by its role in the mesa hierarchy. enum InodeRole { /// The filesystem root (ino == 1). Root, /// An inode owned by some org. - OrgOwned { idx: usize }, + OrgOwned, } /// The top-level `MesaFS` filesystem. /// /// Composes multiple [`OrgFs`] instances, each with its own inode namespace, -/// using [`HashMapBridge`] for bidirectional inode/fh translation at each boundary. +/// delegating to [`CompositeFs`] for inode/fh translation at each boundary. pub struct MesaFS { - icache: MescloudICache, - file_table: FileTable, - readdir_buf: Vec, - - /// Maps mesa-level org-root inodes → index into `org_slots`. - org_inodes: HashMap, - org_slots: Vec, + composite: CompositeFs, } impl MesaFS { @@ -126,23 +107,30 @@ impl MesaFS { block_size: Self::BLOCK_SIZE, }; Self { - icache: MescloudICache::new(resolver, Self::ROOT_NODE_INO, fs_owner, Self::BLOCK_SIZE), - file_table: FileTable::new(), - readdir_buf: Vec::new(), - org_inodes: HashMap::new(), - org_slots: orgs - .map(|org_conf| { - let client = MesaClient::builder() - .with_api_key(org_conf.api_key.expose_secret()) - .with_base_path(MESA_API_BASE_URL) - .build(); - let org = OrgFs::new(org_conf.name, client, fs_owner); - OrgSlot { - org, - bridge: HashMapBridge::new(), - } - }) - .collect(), + composite: CompositeFs { + icache: MescloudICache::new( + resolver, + Self::ROOT_NODE_INO, + fs_owner, + Self::BLOCK_SIZE, + ), + file_table: FileTable::new(), + readdir_buf: Vec::new(), + child_inodes: HashMap::new(), + slots: orgs + .map(|org_conf| { + let client = MesaClient::builder() + .with_api_key(org_conf.api_key.expose_secret()) + .with_base_path(MESA_API_BASE_URL) + .build(); + let org = OrgFs::new(org_conf.name, client, fs_owner); + ChildSlot { + inner: org, + bridge: HashMapBridge::new(), + } + }) + .collect(), + }, } } @@ -151,44 +139,30 @@ impl MesaFS { if ino == Self::ROOT_NODE_INO { return InodeRole::Root; } - if let Some(&idx) = self.org_inodes.get(&ino) { - return InodeRole::OrgOwned { idx }; + if self.composite.child_inodes.contains_key(&ino) { + return InodeRole::OrgOwned; } - // Walk parent chain. - if let Some(idx) = self.org_slot_for_inode(ino).await { - return InodeRole::OrgOwned { idx }; + if self.composite.slot_for_inode(ino).await.is_some() { + return InodeRole::OrgOwned; } debug_assert!(false, "inode {ino} not found in any org slot"); InodeRole::Root } - /// Find the org slot index that owns `ino` by walking the parent chain. - async fn org_slot_for_inode(&self, ino: Inode) -> Option { - if let Some(&idx) = self.org_inodes.get(&ino) { - return Some(idx); - } - let mut current = ino; - loop { - let parent = self - .icache - .get_icb(current, |icb| icb.parent) - .await - .flatten()?; - if let Some(&idx) = self.org_inodes.get(&parent) { - return Some(idx); - } - current = parent; - } - } - /// Ensure a mesa-level inode exists for the org at `org_idx`. /// Seeds the bridge with (`mesa_org_ino`, `OrgFs::ROOT_INO`). /// Does NOT bump rc. async fn ensure_org_inode(&mut self, org_idx: usize) -> (Inode, FileAttr) { // Check if an inode already exists. - if let Some((&existing_ino, _)) = self.org_inodes.iter().find(|&(_, &idx)| idx == org_idx) { - if let Some(attr) = self.icache.get_attr(existing_ino).await { + if let Some((&existing_ino, _)) = self + .composite + .child_inodes + .iter() + .find(|&(_, &idx)| idx == org_idx) + { + if let Some(attr) = self.composite.icache.get_attr(existing_ino).await { let rc = self + .composite .icache .get_icb(existing_ino, |icb| icb.rc) .await @@ -211,21 +185,22 @@ impl MesaFS { 0o755, now, now, - self.icache.fs_owner(), - self.icache.block_size(), + self.composite.icache.fs_owner(), + self.composite.icache.block_size(), ), }; - self.icache.cache_attr(existing_ino, attr).await; + self.composite.icache.cache_attr(existing_ino, attr).await; return (existing_ino, attr); } // Allocate new. - let org_name = self.org_slots[org_idx].org.name().to_owned(); - let ino = self.icache.allocate_inode(); + let org_name = self.composite.slots[org_idx].inner.name().to_owned(); + let ino = self.composite.icache.allocate_inode(); trace!(ino, org_idx, org = %org_name, "ensure_org_inode: allocated new inode"); let now = SystemTime::now(); - self.icache + self.composite + .icache .insert_icb( ino, InodeControlBlock { @@ -238,10 +213,10 @@ impl MesaFS { ) .await; - self.org_inodes.insert(ino, org_idx); + self.composite.child_inodes.insert(ino, org_idx); - // Seed bridge: mesa org-root ↔ OrgFs::ROOT_INO. - self.org_slots[org_idx] + // Seed bridge: mesa org-root <-> OrgFs::ROOT_INO. + self.composite.slots[org_idx] .bridge .insert_inode(ino, OrgFs::ROOT_INO); @@ -251,50 +226,13 @@ impl MesaFS { 0o755, now, now, - self.icache.fs_owner(), - self.icache.block_size(), + self.composite.icache.fs_owner(), + self.composite.icache.block_size(), ), }; - self.icache.cache_attr(ino, attr).await; + self.composite.icache.cache_attr(ino, attr).await; (ino, attr) } - - /// Allocate a mesa-level file handle and map it through the bridge. - fn alloc_fh(&mut self, slot_idx: usize, org_fh: FileHandle) -> FileHandle { - let fh = self.file_table.allocate(); - self.org_slots[slot_idx].bridge.insert_fh(fh, org_fh); - fh - } - - /// Translate an org inode to a mesa inode, allocating if needed. - /// Also mirrors the ICB into the mesa `inode_table`. - async fn translate_org_ino_to_mesa( - &mut self, - slot_idx: usize, - org_ino: Inode, - parent_mesa_ino: Inode, - name: &OsStr, - ) -> Inode { - let mesa_ino = self.org_slots[slot_idx] - .bridge - .backward_or_insert_inode(org_ino, || self.icache.allocate_inode()); - - self.icache - .entry_or_insert_icb( - mesa_ino, - || InodeControlBlock { - rc: 0, - path: name.into(), - parent: Some(parent_mesa_ino), - attr: None, - children: None, - }, - |_| {}, - ) - .await; - - mesa_ino - } } #[async_trait::async_trait] @@ -310,39 +248,21 @@ impl Fs for MesaFS { async fn lookup(&mut self, parent: Inode, name: &OsStr) -> Result { match self.inode_role(parent).await { InodeRole::Root => { - // Children of root are orgs. let org_name = name.to_str().ok_or(LookupError::InodeNotFound)?; let org_idx = self - .org_slots + .composite + .slots .iter() - .position(|s| s.org.name() == org_name) + .position(|s| s.inner.name() == org_name) .ok_or(LookupError::InodeNotFound)?; trace!(org = org_name, "lookup: matched org"); let (ino, attr) = self.ensure_org_inode(org_idx).await; - let rc = self.icache.inc_rc(ino).await; + let rc = self.composite.icache.inc_rc(ino).await; trace!(ino, org = org_name, rc, "lookup: resolved org inode"); Ok(attr) } - InodeRole::OrgOwned { idx } => { - // Delegate to org. - let org_parent = self.org_slots[idx] - .bridge - .forward_or_insert_inode(parent, || unreachable!("forward should find parent")); - - let org_attr = self.org_slots[idx].org.lookup(org_parent, name).await?; - let org_ino = org_attr.common().ino; - - let mesa_ino = self - .translate_org_ino_to_mesa(idx, org_ino, parent, name) - .await; - - let mesa_attr = self.org_slots[idx].bridge.attr_backward(org_attr); - self.icache.cache_attr(mesa_ino, mesa_attr).await; - let rc = self.icache.inc_rc(mesa_ino).await; - trace!(mesa_ino, org_ino, rc, "lookup: resolved via org delegation"); - Ok(mesa_attr) - } + InodeRole::OrgOwned => self.composite.delegated_lookup(parent, name).await, } } @@ -352,10 +272,7 @@ impl Fs for MesaFS { ino: Inode, _fh: Option, ) -> Result { - self.icache.get_attr(ino).await.ok_or_else(|| { - warn!(ino, "getattr on unknown inode"); - GetAttrError::InodeNotFound - }) + self.composite.delegated_getattr(ino).await } #[instrument(skip(self))] @@ -363,10 +280,11 @@ impl Fs for MesaFS { match self.inode_role(ino).await { InodeRole::Root => { let org_info: Vec<(usize, String)> = self - .org_slots + .composite + .slots .iter() .enumerate() - .map(|(idx, s)| (idx, s.org.name().to_owned())) + .map(|(idx, s)| (idx, s.inner.name().to_owned())) .collect(); let mut entries = Vec::with_capacity(org_info.len()); @@ -380,67 +298,16 @@ impl Fs for MesaFS { } trace!(entry_count = entries.len(), "readdir: listing orgs"); - - self.readdir_buf = entries; - Ok(&self.readdir_buf) - } - InodeRole::OrgOwned { idx } => { - let org_ino = self.org_slots[idx] - .bridge - .forward_or_insert_inode(ino, || unreachable!("readdir: ino should be mapped")); - - let org_entries = self.org_slots[idx].org.readdir(org_ino).await?; - let org_entries: Vec = org_entries.to_vec(); - - let mut mesa_entries = Vec::with_capacity(org_entries.len()); - for entry in &org_entries { - let mesa_child_ino = self - .translate_org_ino_to_mesa(idx, entry.ino, ino, &entry.name) - .await; - - // Cache attr from org if available. - if let Some(org_icb_attr) = self.org_slots[idx].org.peek_attr(entry.ino).await { - let mesa_attr = self.org_slots[idx].bridge.attr_backward(org_icb_attr); - self.icache.cache_attr(mesa_child_ino, mesa_attr).await; - } - - mesa_entries.push(DirEntry { - ino: mesa_child_ino, - name: entry.name.clone(), - kind: entry.kind, - }); - } - - self.readdir_buf = mesa_entries; - Ok(&self.readdir_buf) + self.composite.readdir_buf = entries; + Ok(&self.composite.readdir_buf) } + InodeRole::OrgOwned => self.composite.delegated_readdir(ino).await, } } #[instrument(skip(self))] async fn open(&mut self, ino: Inode, flags: OpenFlags) -> Result { - let idx = self.org_slot_for_inode(ino).await.ok_or_else(|| { - warn!(ino, "open on inode not belonging to any org"); - OpenError::InodeNotFound - })?; - - let org_ino = self.org_slots[idx] - .bridge - .forward_or_insert_inode(ino, || unreachable!("open: ino should be mapped")); - - let org_open = self.org_slots[idx].org.open(org_ino, flags).await?; - let mesa_fh = self.alloc_fh(idx, org_open.handle); - - trace!( - ino, - mesa_fh, - org_fh = org_open.handle, - "open: assigned file handle" - ); - Ok(OpenFile { - handle: mesa_fh, - options: org_open.options, - }) + self.composite.delegated_open(ino, flags).await } #[instrument(skip(self))] @@ -453,22 +320,8 @@ impl Fs for MesaFS { flags: OpenFlags, lock_owner: Option, ) -> Result { - let idx = self.org_slot_for_inode(ino).await.ok_or_else(|| { - warn!(ino, "read on inode not belonging to any org"); - ReadError::InodeNotFound - })?; - - let org_ino = self.org_slots[idx] - .bridge - .forward_or_insert_inode(ino, || unreachable!("read: ino should be mapped")); - let org_fh = self.org_slots[idx].bridge.fh_forward(fh).ok_or_else(|| { - warn!(fh, "read: no fh mapping found"); - ReadError::FileNotOpen - })?; - - self.org_slots[idx] - .org - .read(org_ino, org_fh, offset, size, flags, lock_owner) + self.composite + .delegated_read(ino, fh, offset, size, flags, lock_owner) .await } @@ -480,48 +333,18 @@ impl Fs for MesaFS { flags: OpenFlags, flush: bool, ) -> Result<(), ReleaseError> { - let idx = self.org_slot_for_inode(ino).await.ok_or_else(|| { - warn!(ino, "release on inode not belonging to any org"); - ReleaseError::FileNotOpen - })?; - - let org_ino = self.org_slots[idx] - .bridge - .forward_or_insert_inode(ino, || unreachable!("release: ino should be mapped")); - let org_fh = self.org_slots[idx].bridge.fh_forward(fh).ok_or_else(|| { - warn!(fh, "release: no fh mapping found"); - ReleaseError::FileNotOpen - })?; - - let result = self.org_slots[idx] - .org - .release(org_ino, org_fh, flags, flush) - .await; - - self.org_slots[idx].bridge.remove_fh_by_left(fh); - trace!(ino, fh, "release: cleaned up fh mapping"); - - result + self.composite + .delegated_release(ino, fh, flags, flush) + .await } #[instrument(skip(self))] async fn forget(&mut self, ino: Inode, nlookups: u64) { - // Propagate forget to inner org if applicable. - if let Some(idx) = self.org_slot_for_inode(ino).await - && let Some(&org_ino) = self.org_slots[idx].bridge.inode_map_get_by_left(ino) - { - self.org_slots[idx].org.forget(org_ino, nlookups).await; - } - - if self.icache.forget(ino, nlookups).await.is_some() { - self.org_inodes.remove(&ino); - for slot in &mut self.org_slots { - slot.bridge.remove_inode_by_left(ino); - } - } + // MesaFS has no extra state to clean up on eviction (unlike OrgFs::owner_inodes). + let _ = self.composite.delegated_forget(ino, nlookups).await; } async fn statfs(&mut self) -> Result { - Ok(self.icache.statfs()) + Ok(self.composite.delegated_statfs()) } } From 16865d5bdc679b0909734ab23788f6613fe7e369 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 14:10:25 -0800 Subject: [PATCH 26/57] refactor: OrgFs now delegates to CompositeFs --- src/fs/mescloud/org.rs | 368 ++++++++--------------------------------- 1 file changed, 70 insertions(+), 298 deletions(-) diff --git a/src/fs/mescloud/org.rs b/src/fs/mescloud/org.rs index 6c23553..5e5ef02 100644 --- a/src/fs/mescloud/org.rs +++ b/src/fs/mescloud/org.rs @@ -9,11 +9,11 @@ use mesa_dev::MesaClient; use secrecy::SecretString; use tracing::{instrument, trace, warn}; -use super::common::InodeCachePeek as _; pub use super::common::{ GetAttrError, LookupError, OpenError, ReadDirError, ReadError, ReleaseError, }; use super::common::{InodeControlBlock, MesaApiError}; +use super::composite::{ChildSlot, CompositeFs}; use super::icache as mescloud_icache; use super::icache::MescloudICache; use super::repo::RepoFs; @@ -24,10 +24,6 @@ use crate::fs::r#trait::{ OpenFlags, }; -// --------------------------------------------------------------------------- -// OrgResolver -// --------------------------------------------------------------------------- - pub(super) struct OrgResolver { fs_owner: (u32, u32), block_size: u32, @@ -71,22 +67,12 @@ impl IcbResolver for OrgResolver { } } -// --------------------------------------------------------------------------- -// OrgFs -// --------------------------------------------------------------------------- - #[derive(Debug, Clone)] pub struct OrgConfig { pub name: String, pub api_key: SecretString, } -/// Per-repo wrapper with inode and file handle translation. -struct RepoSlot { - repo: RepoFs, - bridge: HashMapBridge, // left = org, right = repo -} - /// Classifies an inode by its role in the org hierarchy. enum InodeRole { /// The org root directory. @@ -94,27 +80,19 @@ enum InodeRole { /// A virtual owner directory (github only). OwnerDir, /// An inode owned by some repo. - RepoOwned { idx: usize }, + RepoOwned, } /// A filesystem rooted at a single organization. /// -/// Owns multiple [`RepoFs`] instances and translates inodes between its namespace -/// and each repo's namespace using [`HashMapBridge`]. +/// Composes multiple [`RepoFs`] instances, each with its own inode namespace, +/// delegating to [`CompositeFs`] for inode/fh translation at each boundary. pub struct OrgFs { name: String, client: MesaClient, - - icache: MescloudICache, - file_table: FileTable, - readdir_buf: Vec, - - /// Maps org-level repo-root inodes → index into `repos`. - repo_inodes: HashMap, - /// Maps org-level owner-dir inodes → owner name. - /// Only populated when org name is "github". + composite: CompositeFs, + /// Maps org-level owner-dir inodes to owner name (github only). owner_inodes: HashMap, - repos: Vec, } impl OrgFs { @@ -157,7 +135,7 @@ impl OrgFs { // Check existing for (&ino, existing_owner) in &self.owner_inodes { if existing_owner == owner { - if let Some(attr) = self.icache.get_attr(ino).await { + if let Some(attr) = self.composite.icache.get_attr(ino).await { return (ino, attr); } let now = SystemTime::now(); @@ -167,19 +145,20 @@ impl OrgFs { 0o755, now, now, - self.icache.fs_owner(), - self.icache.block_size(), + self.composite.icache.fs_owner(), + self.composite.icache.block_size(), ), }; - self.icache.cache_attr(ino, attr).await; + self.composite.icache.cache_attr(ino, attr).await; return (ino, attr); } } // Allocate new - let ino = self.icache.allocate_inode(); + let ino = self.composite.icache.allocate_inode(); let now = SystemTime::now(); - self.icache + self.composite + .icache .insert_icb( ino, InodeControlBlock { @@ -198,11 +177,11 @@ impl OrgFs { 0o755, now, now, - self.icache.fs_owner(), - self.icache.block_size(), + self.composite.icache.fs_owner(), + self.composite.icache.block_size(), ), }; - self.icache.cache_attr(ino, attr).await; + self.composite.icache.cache_attr(ino, attr).await; (ino, attr) } @@ -214,12 +193,14 @@ impl OrgFs { Self { name, client, - icache: MescloudICache::new(resolver, Self::ROOT_INO, fs_owner, Self::BLOCK_SIZE), - file_table: FileTable::new(), - readdir_buf: Vec::new(), - repo_inodes: HashMap::new(), + composite: CompositeFs { + icache: MescloudICache::new(resolver, Self::ROOT_INO, fs_owner, Self::BLOCK_SIZE), + file_table: FileTable::new(), + readdir_buf: Vec::new(), + child_inodes: HashMap::new(), + slots: Vec::new(), + }, owner_inodes: HashMap::new(), - repos: Vec::new(), } } @@ -231,43 +212,16 @@ impl OrgFs { if self.owner_inodes.contains_key(&ino) { return InodeRole::OwnerDir; } - if let Some(&idx) = self.repo_inodes.get(&ino) { - return InodeRole::RepoOwned { idx }; + if self.composite.child_inodes.contains_key(&ino) { + return InodeRole::RepoOwned; } - // Walk parent chain to find owning repo. - if let Some(idx) = self.repo_slot_for_inode(ino).await { - return InodeRole::RepoOwned { idx }; + if self.composite.slot_for_inode(ino).await.is_some() { + return InodeRole::RepoOwned; } - // Shouldn't happen — all non-root inodes should be repo-owned. - trace!( - ino, - "inode_role: inode not found in any repo slot, falling back to OrgRoot" - ); debug_assert!(false, "inode {ino} not found in any repo slot"); InodeRole::OrgRoot } - /// Find the repo slot index that owns `ino` by walking the parent chain. - async fn repo_slot_for_inode(&self, ino: Inode) -> Option { - // Direct repo root? - if let Some(&idx) = self.repo_inodes.get(&ino) { - return Some(idx); - } - // Walk parents. - let mut current = ino; - loop { - let parent = self - .icache - .get_icb(current, |icb| icb.parent) - .await - .flatten()?; - if let Some(&idx) = self.repo_inodes.get(&parent) { - return Some(idx); - } - current = parent; - } - } - /// Ensure an inode + `RepoFs` exists for the given repo name. /// Does NOT bump rc. /// @@ -282,10 +236,15 @@ impl OrgFs { parent_ino: Inode, ) -> (Inode, FileAttr) { // Check existing repos. - for (&ino, &idx) in &self.repo_inodes { - if self.repos[idx].repo.repo_name() == repo_name { - if let Some(attr) = self.icache.get_attr(ino).await { - let rc = self.icache.get_icb(ino, |icb| icb.rc).await.unwrap_or(0); + for (&ino, &idx) in &self.composite.child_inodes { + if self.composite.slots[idx].inner.repo_name() == repo_name { + if let Some(attr) = self.composite.icache.get_attr(ino).await { + let rc = self + .composite + .icache + .get_icb(ino, |icb| icb.rc) + .await + .unwrap_or(0); trace!(ino, repo = repo_name, rc, "ensure_repo_inode: reusing"); return (ino, attr); } @@ -302,17 +261,17 @@ impl OrgFs { 0o755, now, now, - self.icache.fs_owner(), - self.icache.block_size(), + self.composite.icache.fs_owner(), + self.composite.icache.block_size(), ), }; - self.icache.cache_attr(ino, attr).await; + self.composite.icache.cache_attr(ino, attr).await; return (ino, attr); } } // Allocate new. - let ino = self.icache.allocate_inode(); + let ino = self.composite.icache.allocate_inode(); trace!( ino, repo = repo_name, @@ -320,7 +279,8 @@ impl OrgFs { ); let now = SystemTime::now(); - self.icache + self.composite + .icache .insert_icb( ino, InodeControlBlock { @@ -338,15 +298,18 @@ impl OrgFs { self.name.clone(), repo_name.to_owned(), default_branch.to_owned(), - self.icache.fs_owner(), + self.composite.icache.fs_owner(), ); let mut bridge = HashMapBridge::new(); bridge.insert_inode(ino, RepoFs::ROOT_INO); - let idx = self.repos.len(); - self.repos.push(RepoSlot { repo, bridge }); - self.repo_inodes.insert(ino, idx); + let idx = self.composite.slots.len(); + self.composite.slots.push(ChildSlot { + inner: repo, + bridge, + }); + self.composite.child_inodes.insert(ino, idx); let attr = FileAttr::Directory { common: mescloud_icache::make_common_file_attr( @@ -354,11 +317,11 @@ impl OrgFs { 0o755, now, now, - self.icache.fs_owner(), - self.icache.block_size(), + self.composite.icache.fs_owner(), + self.composite.icache.block_size(), ), }; - self.icache.cache_attr(ino, attr).await; + self.composite.icache.cache_attr(ino, attr).await; (ino, attr) } @@ -375,63 +338,12 @@ impl OrgFs { .await .map_err(MesaApiError::from) } - - /// Allocate an org-level file handle and map it through the bridge. - fn alloc_fh(&mut self, slot_idx: usize, repo_fh: FileHandle) -> FileHandle { - let fh = self.file_table.allocate(); - self.repos[slot_idx].bridge.insert_fh(fh, repo_fh); - fh - } - - /// Translate a repo inode to an org inode, allocating if needed. - /// Also mirrors the ICB into the org's inode table. - async fn translate_repo_ino_to_org( - &mut self, - slot_idx: usize, - repo_ino: Inode, - parent_org_ino: Inode, - name: &OsStr, - _kind: DirEntryType, - ) -> Inode { - let org_ino = self.repos[slot_idx] - .bridge - .backward_or_insert_inode(repo_ino, || self.icache.allocate_inode()); - - self.icache - .entry_or_insert_icb( - org_ino, - || { - trace!( - org_ino, - repo_ino, - parent = parent_org_ino, - ?name, - "translate: created new org ICB" - ); - InodeControlBlock { - rc: 0, - path: name.into(), - parent: Some(parent_org_ino), - attr: None, - children: None, - } - }, - |icb| { - if icb.rc > 0 || icb.attr.is_some() { - trace!(org_ino, repo_ino, "translate: reused existing org ICB"); - } - }, - ) - .await; - - org_ino - } } #[async_trait::async_trait] impl super::common::InodeCachePeek for OrgFs { async fn peek_attr(&self, ino: Inode) -> Option { - self.icache.get_attr(ino).await + self.composite.icache.get_attr(ino).await } } @@ -455,7 +367,7 @@ impl Fs for OrgFs { // name is an owner like "torvalds" — create lazily, no API validation. trace!(owner = name_str, "lookup: resolving github owner dir"); let (ino, attr) = self.ensure_owner_inode(name_str).await; - self.icache.inc_rc(ino).await; + self.composite.icache.inc_rc(ino).await; Ok(attr) } else { // Children of org root are repos. @@ -467,7 +379,7 @@ impl Fs for OrgFs { let (ino, attr) = self .ensure_repo_inode(name_str, name_str, &repo.default_branch, Self::ROOT_INO) .await; - let rc = self.icache.inc_rc(ino).await; + let rc = self.composite.icache.inc_rc(ino).await; trace!(ino, repo = name_str, rc, "lookup: resolved repo inode"); Ok(attr) } @@ -497,33 +409,10 @@ impl Fs for OrgFs { let (ino, attr) = self .ensure_repo_inode(&encoded, repo_name_str, &repo.default_branch, parent) .await; - self.icache.inc_rc(ino).await; + self.composite.icache.inc_rc(ino).await; Ok(attr) } - InodeRole::RepoOwned { idx } => { - // Delegate to repo. - let repo_parent = self.repos[idx] - .bridge - .forward_or_insert_inode(parent, || unreachable!("forward should find parent")); - // ^ forward should always find parent since it was previously mapped. - // Using forward_or_insert just for safety, but the allocate closure should never run. - - let repo_attr = self.repos[idx].repo.lookup(repo_parent, name).await?; - let repo_ino = repo_attr.common().ino; - - // Translate back to org namespace. - let kind: DirEntryType = repo_attr.into(); - let org_ino = self - .translate_repo_ino_to_org(idx, repo_ino, parent, name, kind) - .await; - - // Rebuild attr with org inode. - let org_attr = self.repos[idx].bridge.attr_backward(repo_attr); - self.icache.cache_attr(org_ino, org_attr).await; - let rc = self.icache.inc_rc(org_ino).await; - trace!(org_ino, repo_ino, rc, "lookup: resolved content inode"); - Ok(org_attr) - } + InodeRole::RepoOwned => self.composite.delegated_lookup(parent, name).await, } } @@ -533,10 +422,7 @@ impl Fs for OrgFs { ino: Inode, _fh: Option, ) -> Result { - self.icache.get_attr(ino).await.ok_or_else(|| { - warn!(ino, "getattr on unknown inode"); - GetAttrError::InodeNotFound - }) + self.composite.delegated_getattr(ino).await } #[instrument(skip(self), fields(org = %self.name))] @@ -580,79 +466,21 @@ impl Fs for OrgFs { }); } - self.readdir_buf = entries; - Ok(&self.readdir_buf) + self.composite.readdir_buf = entries; + Ok(&self.composite.readdir_buf) } InodeRole::OwnerDir if self.is_github() => { // TODO(MES-674): Cleanup "special" casing for github. Err(ReadDirError::NotPermitted) } InodeRole::OwnerDir => Err(ReadDirError::NotADirectory), - InodeRole::RepoOwned { idx } => { - // Delegate to repo. - let repo_ino = self.repos[idx] - .bridge - .forward_or_insert_inode(ino, || unreachable!("readdir: ino should be mapped")); - - let repo_entries = self.repos[idx].repo.readdir(repo_ino).await?; - // Clone entries to release borrow on repo before mutating self. - let repo_entries: Vec = repo_entries.to_vec(); - - let mut org_entries = Vec::with_capacity(repo_entries.len()); - for entry in &repo_entries { - let org_child_ino = self - .translate_repo_ino_to_org(idx, entry.ino, ino, &entry.name, entry.kind) - .await; - - // Cache attr from repo if available. - if let Some(repo_icb_attr) = self.repos[idx].repo.peek_attr(entry.ino).await { - let org_attr = self.repos[idx].bridge.attr_backward(repo_icb_attr); - self.icache.cache_attr(org_child_ino, org_attr).await; - } else { - trace!( - repo_ino = entry.ino, - org_ino = org_child_ino, - "readdir: no cached attr from repo to propagate" - ); - } - - org_entries.push(DirEntry { - ino: org_child_ino, - name: entry.name.clone(), - kind: entry.kind, - }); - } - - self.readdir_buf = org_entries; - Ok(&self.readdir_buf) - } + InodeRole::RepoOwned => self.composite.delegated_readdir(ino).await, } } #[instrument(skip(self), fields(org = %self.name))] async fn open(&mut self, ino: Inode, flags: OpenFlags) -> Result { - let idx = self.repo_slot_for_inode(ino).await.ok_or_else(|| { - warn!(ino, "open on inode not belonging to any repo"); - OpenError::InodeNotFound - })?; - - let repo_ino = self.repos[idx] - .bridge - .forward_or_insert_inode(ino, || unreachable!("open: ino should be mapped")); - - let repo_open = self.repos[idx].repo.open(repo_ino, flags).await?; - let org_fh = self.alloc_fh(idx, repo_open.handle); - - trace!( - ino, - org_fh, - repo_fh = repo_open.handle, - "open: assigned file handle" - ); - Ok(OpenFile { - handle: org_fh, - options: repo_open.options, - }) + self.composite.delegated_open(ino, flags).await } #[instrument(skip(self), fields(org = %self.name))] @@ -665,26 +493,8 @@ impl Fs for OrgFs { flags: OpenFlags, lock_owner: Option, ) -> Result { - let idx = self.repo_slot_for_inode(ino).await.ok_or_else(|| { - warn!(ino, "read on inode not belonging to any repo"); - ReadError::InodeNotFound - })?; - - let repo_ino = self.repos[idx] - .bridge - .forward_or_insert_inode(ino, || unreachable!("read: ino should be mapped")); - let repo_fh = self.repos[idx].bridge.fh_forward(fh).ok_or_else(|| { - warn!(fh, "read: no fh mapping found"); - ReadError::FileNotOpen - })?; - - trace!( - ino, - fh, repo_ino, repo_fh, offset, size, "read: delegating to repo" - ); - self.repos[idx] - .repo - .read(repo_ino, repo_fh, offset, size, flags, lock_owner) + self.composite + .delegated_read(ino, fh, offset, size, flags, lock_owner) .await } @@ -696,58 +506,20 @@ impl Fs for OrgFs { flags: OpenFlags, flush: bool, ) -> Result<(), ReleaseError> { - let idx = self.repo_slot_for_inode(ino).await.ok_or_else(|| { - warn!(ino, "release on inode not belonging to any repo"); - ReleaseError::FileNotOpen - })?; - - let repo_ino = self.repos[idx] - .bridge - .forward_or_insert_inode(ino, || unreachable!("release: ino should be mapped")); - let repo_fh = self.repos[idx].bridge.fh_forward(fh).ok_or_else(|| { - warn!(fh, "release: no fh mapping found"); - ReleaseError::FileNotOpen - })?; - - trace!(ino, fh, repo_ino, repo_fh, "release: delegating to repo"); - let result = self.repos[idx] - .repo - .release(repo_ino, repo_fh, flags, flush) - .await; - - // Clean up fh mapping. - self.repos[idx].bridge.remove_fh_by_left(fh); - trace!(ino, fh, "release: cleaned up fh mapping"); - - result + self.composite + .delegated_release(ino, fh, flags, flush) + .await } #[instrument(skip(self), fields(org = %self.name))] async fn forget(&mut self, ino: Inode, nlookups: u64) { - // Propagate forget to inner repo if applicable. - if let Some(idx) = self.repo_slot_for_inode(ino).await { - if let Some(&repo_ino) = self.repos[idx].bridge.inode_map_get_by_left(ino) { - self.repos[idx].repo.forget(repo_ino, nlookups).await; - } else { - trace!( - ino, - "forget: no bridge mapping found, skipping repo propagation" - ); - } - } - - if self.icache.forget(ino, nlookups).await.is_some() { - // Clean up repo_inodes and owner_inodes mappings. - self.repo_inodes.remove(&ino); + let evicted = self.composite.delegated_forget(ino, nlookups).await; + if evicted { self.owner_inodes.remove(&ino); - // Clean up bridge mapping — find which slot, remove. - for slot in &mut self.repos { - slot.bridge.remove_inode_by_left(ino); - } } } async fn statfs(&mut self) -> Result { - Ok(self.icache.statfs()) + Ok(self.composite.delegated_statfs()) } } From 1ce84d54f6efd005b2b069525f60c94ce7498aee Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 14:15:18 -0800 Subject: [PATCH 27/57] chore: add #[must_use] to OrgFs and MesaFS constructors and accessors --- src/fs/mescloud/mod.rs | 1 + src/fs/mescloud/org.rs | 2 ++ 2 files changed, 3 insertions(+) diff --git a/src/fs/mescloud/mod.rs b/src/fs/mescloud/mod.rs index 150b831..07a6001 100644 --- a/src/fs/mescloud/mod.rs +++ b/src/fs/mescloud/mod.rs @@ -101,6 +101,7 @@ impl MesaFS { const BLOCK_SIZE: u32 = 4096; /// Create a new `MesaFS` instance. + #[must_use] pub fn new(orgs: impl Iterator, fs_owner: (u32, u32)) -> Self { let resolver = MesaResolver { fs_owner, diff --git a/src/fs/mescloud/org.rs b/src/fs/mescloud/org.rs index 5e5ef02..a8aac36 100644 --- a/src/fs/mescloud/org.rs +++ b/src/fs/mescloud/org.rs @@ -100,6 +100,7 @@ impl OrgFs { const BLOCK_SIZE: u32 = 4096; /// The name of the organization. + #[must_use] pub(crate) fn name(&self) -> &str { &self.name } @@ -185,6 +186,7 @@ impl OrgFs { (ino, attr) } + #[must_use] pub fn new(name: String, client: MesaClient, fs_owner: (u32, u32)) -> Self { let resolver = OrgResolver { fs_owner, From 985bb104ca1ceaf58cb96f221e67de2302fb1b4d Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 14:15:39 -0800 Subject: [PATCH 28/57] uv.lock --- uv.lock | 33 +++++++++++++++++++++++++++++---- 1 file changed, 29 insertions(+), 4 deletions(-) diff --git a/uv.lock b/uv.lock index 717a703..f7de7d0 100644 --- a/uv.lock +++ b/uv.lock @@ -68,6 +68,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e3/26/57c6fb270950d476074c087527a558ccb6f4436657314bfb6cdf484114c4/docker-7.1.0-py3-none-any.whl", hash = "sha256:c96b93b7f0a746f9e77d325bcfb87422a3d8bd4f03136ae8a85b37f1898d5fc0", size = 147774, upload-time = "2024-05-23T11:13:55.01Z" }, ] +[[package]] +name = "execnet" +version = "2.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bf/89/780e11f9588d9e7128a3f87788354c7946a9cbb1401ad38a48c4db9a4f07/execnet-2.1.2.tar.gz", hash = "sha256:63d83bfdd9a23e35b9c6a3261412324f964c2ec8dcd8d3c6916ee9373e0befcd", size = 166622, upload-time = "2025-11-12T09:56:37.75Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ab/84/02fc1827e8cdded4aa65baef11296a9bbe595c474f0d6d758af082d849fd/execnet-2.1.2-py3-none-any.whl", hash = "sha256:67fba928dd5a544b783f6056f449e5e3931a5c378b128bc18501f7ea79e296ec", size = 40708, upload-time = "2025-11-12T09:56:36.333Z" }, +] + [[package]] name = "git-fs-tests" version = "0.0.0" @@ -79,6 +88,7 @@ dev = [ { name = "pytest" }, { name = "pytest-in-docker" }, { name = "pytest-timeout" }, + { name = "pytest-xdist" }, { name = "ruff" }, ] @@ -88,8 +98,9 @@ dev = [ dev = [ { name = "pyright", specifier = ">=1.1.390" }, { name = "pytest", specifier = ">=9.0.2" }, - { name = "pytest-in-docker", specifier = ">=0.2.0" }, + { name = "pytest-in-docker", specifier = ">=0.2.1" }, { name = "pytest-timeout", specifier = ">=2.4.0" }, + { name = "pytest-xdist", specifier = ">=3.5.0" }, { name = "ruff", specifier = ">=0.9.0" }, ] @@ -190,17 +201,18 @@ wheels = [ [[package]] name = "pytest-in-docker" -version = "0.2.0" +version = "0.2.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cloudpickle" }, { name = "pytest" }, + { name = "pytest-xdist" }, { name = "rpyc" }, { name = "testcontainers" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/58/a0/c17c7e77c6c10c07036f38a04972dfce62cdddef42ddf9ddfe831a244e78/pytest_in_docker-0.2.0.tar.gz", hash = "sha256:20be61a5669b6c91577079fa7a6b59e7a6cd5a106177f6b3ab8e4b19e63310fb", size = 3793573, upload-time = "2026-02-09T22:26:40.489Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f6/ce/dfb5a8cb7dfb317b5cf27bc03c91b9f5b9d0537da893369a78dc8219d0ac/pytest_in_docker-0.2.1.tar.gz", hash = "sha256:5f25ceb12eb98a495c1f9f4764a73dd7e86b1213e6f1324f8f9c222ae34a00b5", size = 3794359, upload-time = "2026-02-09T23:53:08.471Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c5/2b/f32da99dfb22cf2af891fe810d9952d600de15a03bb652f67c96cf0a21c3/pytest_in_docker-0.2.0-py3-none-any.whl", hash = "sha256:e793efaa917c5a7f5d730ba2dbd8ffebc77e0aca556dfec958029b8f3fae9f7d", size = 12618, upload-time = "2026-02-09T22:26:38.738Z" }, + { url = "https://files.pythonhosted.org/packages/2f/14/180cf34e1ccb81aef0e683b1d2db3c1e292cf886eb56316b229c6f7e6f3d/pytest_in_docker-0.2.1-py3-none-any.whl", hash = "sha256:a903c3d903985fb6d446eb184a58e6fea725d49f916c18ce2762375b037c85e1", size = 12072, upload-time = "2026-02-09T23:53:06.822Z" }, ] [[package]] @@ -215,6 +227,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/fa/b6/3127540ecdf1464a00e5a01ee60a1b09175f6913f0644ac748494d9c4b21/pytest_timeout-2.4.0-py3-none-any.whl", hash = "sha256:c42667e5cdadb151aeb5b26d114aff6bdf5a907f176a007a30b940d3d865b5c2", size = 14382, upload-time = "2025-05-05T19:44:33.502Z" }, ] +[[package]] +name = "pytest-xdist" +version = "3.8.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "execnet" }, + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/78/b4/439b179d1ff526791eb921115fca8e44e596a13efeda518b9d845a619450/pytest_xdist-3.8.0.tar.gz", hash = "sha256:7e578125ec9bc6050861aa93f2d59f1d8d085595d6551c2c90b6f4fad8d3a9f1", size = 88069, upload-time = "2025-07-01T13:30:59.346Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ca/31/d4e37e9e550c2b92a9cbc2e4d0b7420a27224968580b5a447f420847c975/pytest_xdist-3.8.0-py3-none-any.whl", hash = "sha256:202ca578cfeb7370784a8c33d6d05bc6e13b4f25b5053c30a152269fd10f0b88", size = 46396, upload-time = "2025-07-01T13:30:56.632Z" }, +] + [[package]] name = "python-dotenv" version = "1.2.1" From 32639e6cebfc45ae021b4e162fefa8c9b3f59afb Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 14:53:28 -0800 Subject: [PATCH 29/57] chore: remove dead code from icache refactoring MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove unused methods, types, and files accumulated during the readdir-icache-caching refactoring — recoverable from git history. --- src/fs/icache/async_cache.rs | 44 ---- src/fs/icache/cache.rs | 68 ------ src/fs/icache/mod.rs | 2 - src/fs/local.rs | 454 ----------------------------------- src/fs/mescloud/icache.rs | 32 --- src/fs/mescloud/org.rs | 12 - src/fs/mod.rs | 1 - 7 files changed, 613 deletions(-) delete mode 100644 src/fs/icache/cache.rs delete mode 100644 src/fs/local.rs diff --git a/src/fs/icache/async_cache.rs b/src/fs/icache/async_cache.rs index c5606fa..024a909 100644 --- a/src/fs/icache/async_cache.rs +++ b/src/fs/icache/async_cache.rs @@ -116,16 +116,6 @@ impl AsyncICache { self.inode_table.contains_sync(&ino) } - /// Check whether `ino` is fully resolved (`Available`). - /// - /// Returns `false` if the entry is missing **or** still `InFlight`. - /// This is a non-blocking, synchronous check. - pub fn contains_resolved(&self, ino: Inode) -> bool { - self.inode_table - .read_sync(&ino, |_, s| matches!(s, IcbState::Available(_))) - .unwrap_or(false) - } - /// Read an ICB via closure. **Awaits** if `InFlight`. /// Returns `None` if `ino` doesn't exist. pub async fn get_icb(&self, ino: Inode, f: impl FnOnce(&R::Icb) -> T) -> Option { @@ -475,36 +465,6 @@ mod tests { assert!(!cache.contains(999), "missing inode should not exist"); } - #[tokio::test] - async fn contains_resolved_returns_true_for_root() { - let cache = test_cache(); - assert!(cache.contains_resolved(1), "root should be resolved"); - } - - #[tokio::test] - async fn contains_resolved_returns_false_for_missing() { - let cache = test_cache(); - assert!( - !cache.contains_resolved(999), - "missing inode should not be resolved" - ); - } - - #[tokio::test] - async fn contains_resolved_returns_false_for_inflight() { - let cache = test_cache(); - let (_tx, rx) = watch::channel(()); - cache - .inode_table - .upsert_async(42, IcbState::InFlight(rx)) - .await; - assert!(cache.contains(42), "InFlight entry should exist"); - assert!( - !cache.contains_resolved(42), - "InFlight entry should not be resolved" - ); - } - #[tokio::test] async fn contains_after_resolver_completes() { let resolver = TestResolver::new(); @@ -820,10 +780,6 @@ mod tests { drop(tx); assert!(cache.contains(42), "entry should exist in table"); - assert!( - cache.contains_resolved(42), - "should be resolved after insert_icb overwrote InFlight" - ); } // -- get_or_resolve tests -- diff --git a/src/fs/icache/cache.rs b/src/fs/icache/cache.rs deleted file mode 100644 index a3818f9..0000000 --- a/src/fs/icache/cache.rs +++ /dev/null @@ -1,68 +0,0 @@ -//! Generic inode table with reference counting. - -use std::collections::HashMap; - -use tracing::{trace, warn}; - -use crate::fs::r#trait::Inode; - -use super::IcbLike; - -/// Generic directory cache. -/// -/// Owns an inode table. Provides reference counting and ICB lookup/insertion. -pub struct ICache { - inode_table: HashMap, -} - -impl ICache { - /// Create a new `ICache` with a root ICB at `root_ino` (rc=1). - pub fn new(root_ino: Inode, root_path: impl Into) -> Self { - let mut inode_table = HashMap::new(); - inode_table.insert(root_ino, I::new_root(root_path.into())); - Self { inode_table } - } - - pub fn get_icb(&self, ino: Inode) -> Option<&I> { - self.inode_table.get(&ino) - } - - pub fn get_icb_mut(&mut self, ino: Inode) -> Option<&mut I> { - self.inode_table.get_mut(&ino) - } - - pub fn contains(&self, ino: Inode) -> bool { - self.inode_table.contains_key(&ino) - } - - /// Insert an ICB only if absent. - /// Returns a mutable reference to the (possibly pre-existing) ICB. - pub fn entry_or_insert_icb(&mut self, ino: Inode, f: impl FnOnce() -> I) -> &mut I { - self.inode_table.entry(ino).or_insert_with(f) - } - - /// Number of inodes in the table. - pub fn inode_count(&self) -> usize { - self.inode_table.len() - } - - /// Decrement rc by `nlookups`. Returns `Some(evicted_icb)` if the inode was evicted. - pub fn forget(&mut self, ino: Inode, nlookups: u64) -> Option { - match self.inode_table.entry(ino) { - std::collections::hash_map::Entry::Occupied(mut entry) => { - if entry.get().rc() <= nlookups { - trace!(ino, "evicting inode"); - Some(entry.remove()) - } else { - *entry.get_mut().rc_mut() -= nlookups; - trace!(ino, new_rc = entry.get().rc(), "decremented rc"); - None - } - } - std::collections::hash_map::Entry::Vacant(_) => { - warn!(ino, "forget on unknown inode"); - None - } - } - } -} diff --git a/src/fs/icache/mod.rs b/src/fs/icache/mod.rs index 41d94ae..20fa221 100644 --- a/src/fs/icache/mod.rs +++ b/src/fs/icache/mod.rs @@ -2,13 +2,11 @@ pub mod async_cache; pub mod bridge; -mod cache; mod file_table; mod inode_factory; pub use async_cache::AsyncICache; pub use async_cache::IcbResolver; -pub use cache::ICache; pub use file_table::FileTable; pub use inode_factory::InodeFactory; diff --git a/src/fs/local.rs b/src/fs/local.rs deleted file mode 100644 index 78ff12b..0000000 --- a/src/fs/local.rs +++ /dev/null @@ -1,454 +0,0 @@ -//! An implementation of a filesystem that directly overlays the host filesystem. -use bytes::Bytes; -use nix::sys::statvfs::statvfs; -use std::{collections::HashMap, path::PathBuf}; -use thiserror::Error; -use tokio::io::{AsyncReadExt as _, AsyncSeekExt as _}; - -use std::ffi::OsStr; -use tracing::warn; - -use crate::fs::icache::{FileTable, ICache, IcbLike}; -use crate::fs::r#trait::{ - DirEntry, FileAttr, FileHandle, FileOpenOptions, FilesystemStats, Fs, Inode, LockOwner, - OpenFile, OpenFlags, -}; - -#[derive(Debug, Error)] -pub enum LookupError { - #[error("inode not found")] - InodeNotFound, - #[error("io error: {0}")] - Io(#[from] std::io::Error), - - #[error("invalid file type")] - InvalidFileType, -} - -impl From for i32 { - fn from(e: LookupError) -> Self { - match e { - LookupError::InodeNotFound => libc::ENOENT, - LookupError::Io(ref io_err) => io_err.raw_os_error().unwrap_or(libc::EIO), - LookupError::InvalidFileType => libc::EINVAL, - } - } -} - -#[derive(Debug, Error)] -pub enum GetAttrError { - #[error("inode not found")] - InodeNotFound, - - #[error("io error: {0}")] - Io(#[from] std::io::Error), - - #[error("invalid file type")] - InvalidFileType, -} - -impl From for i32 { - fn from(e: GetAttrError) -> Self { - match e { - GetAttrError::InodeNotFound => libc::ENOENT, - GetAttrError::Io(ref io_err) => io_err.raw_os_error().unwrap_or(libc::EIO), - GetAttrError::InvalidFileType => libc::EINVAL, - } - } -} - -#[derive(Debug, Error)] -pub enum OpenError { - #[error("inode not found")] - InodeNotFound, - - #[error("io error: {0}")] - Io(#[from] std::io::Error), -} - -impl From for i32 { - fn from(e: OpenError) -> Self { - match e { - OpenError::InodeNotFound => libc::ENOENT, - OpenError::Io(ref io_err) => io_err.raw_os_error().unwrap_or(libc::EIO), - } - } -} - -#[derive(Debug, Error)] -pub enum ReadError { - #[error("inode not found")] - InodeNotFound, - - #[error("file not open")] - FileNotOpen, - - #[error("io error: {0}")] - Io(#[from] std::io::Error), -} - -impl From for i32 { - fn from(e: ReadError) -> Self { - match e { - ReadError::InodeNotFound => libc::ENOENT, - ReadError::FileNotOpen => libc::EBADF, - ReadError::Io(ref io_err) => io_err.raw_os_error().unwrap_or(libc::EIO), - } - } -} - -#[derive(Debug, Error)] -pub enum ReleaseError { - #[error("file not open")] - FileNotOpen, -} - -impl From for i32 { - fn from(e: ReleaseError) -> Self { - match e { - ReleaseError::FileNotOpen => libc::EBADF, - } - } -} - -#[derive(Debug, Error)] -pub enum ReadDirError { - #[error("inode not found")] - InodeNotFound, - - #[error("io error: {0}")] - Io(#[from] std::io::Error), - - #[error("invalid file type")] - InvalidFileType, -} - -impl From for i32 { - fn from(e: ReadDirError) -> Self { - match e { - ReadDirError::InodeNotFound => libc::ENOENT, - ReadDirError::Io(ref io_err) => io_err.raw_os_error().unwrap_or(libc::EIO), - ReadDirError::InvalidFileType => libc::EINVAL, - } - } -} - -struct InodeControlBlock { - pub rc: u64, - pub path: PathBuf, - pub children: Option>, -} - -impl IcbLike for InodeControlBlock { - fn new_root(path: PathBuf) -> Self { - Self { - rc: 1, - path, - children: None, - } - } - - fn rc(&self) -> u64 { - self.rc - } - - fn rc_mut(&mut self) -> &mut u64 { - &mut self.rc - } - - fn needs_resolve(&self) -> bool { - false // local FS entries are always fully resolved - } -} - -pub struct LocalFs { - icache: ICache, - file_table: FileTable, - open_files: HashMap, -} - -impl LocalFs { - #[expect(dead_code, reason = "alternative filesystem implementation")] - pub fn new(abs_path: impl Into) -> Self { - Self { - icache: ICache::new(1, abs_path), - file_table: FileTable::new(), - open_files: HashMap::new(), - } - } - - fn abspath(&self) -> &PathBuf { - &self - .icache - .get_icb(1) - .unwrap_or_else(|| unreachable!("root inode 1 must always exist in inode_table")) - .path - } - - async fn parse_tokio_dirent( - dir_entry: &tokio::fs::DirEntry, - ) -> Result { - return Ok(DirEntry { - ino: dir_entry.ino(), - name: dir_entry.file_name(), - kind: dir_entry.file_type().await?.try_into().map_err(|()| { - tokio::io::Error::new( - tokio::io::ErrorKind::InvalidData, - "invalid file type in directory entry", - ) - })?, - }); - } -} - -#[async_trait::async_trait] -impl Fs for LocalFs { - type LookupError = LookupError; - type GetAttrError = GetAttrError; - type OpenError = OpenError; - type ReadError = ReadError; - type ReaddirError = ReadDirError; - type ReleaseError = ReleaseError; - - async fn lookup(&mut self, parent: Inode, name: &OsStr) -> Result { - debug_assert!( - self.icache.contains(parent), - "parent inode {parent} not in inode_table" - ); - let parent_icb = self.icache.get_icb(parent).ok_or_else(|| { - warn!( - "Lookup called on unknown parent inode {}. This is a programming bug", - parent - ); - LookupError::InodeNotFound - })?; - - let child_path = parent_icb.path.join(name); - let meta = tokio::fs::metadata(&child_path) - .await - .map_err(LookupError::Io)?; - - let file_attr = FileAttr::try_from(meta).map_err(|()| LookupError::InvalidFileType); - debug_assert!(file_attr.is_ok(), "FileAttr conversion failed unexpectedly"); - let file_attr = file_attr?; - - let icb = self - .icache - .entry_or_insert_icb(file_attr.common().ino, || InodeControlBlock { - rc: 0, - path: child_path, - children: None, - }); - *icb.rc_mut() += 1; - - Ok(file_attr) - } - - async fn getattr( - &mut self, - ino: Inode, - fh: Option, - ) -> Result { - if let Some(fh) = fh { - // The file was already opened, we can just call fstat. - debug_assert!( - self.open_files.contains_key(&fh), - "file handle {fh} not in open_files" - ); - let file = self.open_files.get(&fh).ok_or_else(|| { - warn!( - "GetAttr called on unknown file handle {}. This is a programming bug", - fh - ); - GetAttrError::InodeNotFound - })?; - - let meta = file.metadata().await.map_err(GetAttrError::Io)?; - let file_attr = FileAttr::try_from(meta).map_err(|()| GetAttrError::InvalidFileType); - debug_assert!(file_attr.is_ok(), "FileAttr conversion failed unexpectedly"); - - Ok(file_attr?) - } else { - // No open path, so we have to do a painful stat on the path. - debug_assert!(self.icache.contains(ino), "inode {ino} not in inode_table"); - let icb = self.icache.get_icb(ino).ok_or_else(|| { - warn!( - "GetAttr called on unknown inode {}. This is a programming bug", - ino - ); - GetAttrError::InodeNotFound - })?; - - let meta = tokio::fs::metadata(&icb.path) - .await - .map_err(GetAttrError::Io)?; - let file_attr = FileAttr::try_from(meta).map_err(|()| GetAttrError::InvalidFileType); - debug_assert!(file_attr.is_ok(), "FileAttr conversion failed unexpectedly"); - - Ok(file_attr?) - } - } - - async fn readdir(&mut self, ino: Inode) -> Result<&[DirEntry], ReadDirError> { - debug_assert!(self.icache.contains(ino), "inode {ino} not in inode_table"); - - let inode_cb = self.icache.get_icb(ino).ok_or_else(|| { - warn!( - parent = ino, - "Readdir of unknown parent inode. Programming bug" - ); - ReadDirError::InodeNotFound - })?; - - let mut read_dir = tokio::fs::read_dir(&inode_cb.path) - .await - .map_err(ReadDirError::Io)?; - - // Note that we HAVE to re-read all entries here, since there's really no way for us to - // know whether another process has modified the underlying directory, without our consent. - // - // TODO(markovejnovic): If we can guarantee that only our process has access to the - // underlying directory, we can avoid re-loading the entries every time. - // - // Two mechanisms appear to exist: namespace mount and/or file permissions. - // - // However, both of these mechanisms take time to develop and we don't have time. - let mut entries: Vec = Vec::new(); - while let Some(dir_entry) = read_dir.next_entry().await.map_err(ReadDirError::Io)? { - entries.push(Self::parse_tokio_dirent(&dir_entry).await?); - } - - let inode_cb = self.icache.get_icb_mut(ino).ok_or_else(|| { - warn!(parent = ino, "inode disappeared. TOCTOU programming bug"); - ReadDirError::InodeNotFound - })?; - - Ok(inode_cb.children.insert(entries)) - } - - async fn open(&mut self, ino: Inode, flags: OpenFlags) -> Result { - debug_assert!(self.icache.contains(ino), "inode {ino} not in inode_table"); - let icb = self.icache.get_icb(ino).ok_or_else(|| { - warn!( - "Open called on unknown inode {}. This is a programming bug", - ino - ); - OpenError::InodeNotFound - })?; - - // TODO(markovejnovic): Not all flags are supported here. We could do better. - let file = tokio::fs::OpenOptions::new() - .read(true) - .write(flags.contains(OpenFlags::RDWR) || flags.contains(OpenFlags::WRONLY)) - .append(flags.contains(OpenFlags::APPEND)) - .truncate(flags.contains(OpenFlags::TRUNC)) - .create(flags.contains(OpenFlags::CREAT)) - .open(&icb.path) - .await - .map_err(OpenError::Io)?; - - // Generate a new file handle. - let fh = self.file_table.allocate(); - self.open_files.insert(fh, file); - - Ok(OpenFile { - handle: fh, - // TODO(markovejnovic): Might be interesting to set some of these options. - options: FileOpenOptions::empty(), - }) - } - - async fn read( - &mut self, - ino: Inode, - fh: FileHandle, - offset: u64, - size: u32, - _flags: OpenFlags, - _lock_owner: Option, - ) -> Result { - // TODO(markovejnovic): Respect flags and lock_owner. - debug_assert!(self.icache.contains(ino), "inode {ino} not in inode_table"); - debug_assert!( - self.open_files.contains_key(&fh), - "file handle {fh} not in open_files" - ); - - let file: &mut tokio::fs::File = self.open_files.get_mut(&fh).ok_or_else(|| { - warn!( - "Read called on unknown file handle {}. This is a programming bug", - fh - ); - ReadError::FileNotOpen - })?; - - let mut buffer = vec![0u8; size as usize]; - file.seek(std::io::SeekFrom::Start(offset)) - .await - .map_err(ReadError::Io)?; - let nbytes = file.read(&mut buffer).await.map_err(ReadError::Io)?; - - buffer.truncate(nbytes); - Ok(Bytes::from(buffer)) - } - - async fn release( - &mut self, - _ino: Inode, - fh: FileHandle, - _flags: OpenFlags, - _flush: bool, - ) -> Result<(), ReleaseError> { - self.open_files.remove(&fh).ok_or_else(|| { - warn!( - "Release called on unknown file handle {}. Programming bug", - fh - ); - ReleaseError::FileNotOpen - })?; - Ok(()) - } - - async fn forget(&mut self, ino: Inode, nlookups: u64) { - debug_assert!(self.icache.contains(ino), "inode {ino} not in inode_table"); - - self.icache.forget(ino, nlookups); - } - - async fn statfs(&mut self) -> Result { - let stat = statvfs(self.abspath().as_path())?; - - Ok(FilesystemStats { - block_size: stat.block_size().try_into().map_err(|_| { - std::io::Error::new( - std::io::ErrorKind::InvalidData, - "block size too large to fit into u32", - ) - })?, - fragment_size: stat.fragment_size(), - #[allow(clippy::allow_attributes)] - #[allow(clippy::useless_conversion)] - total_blocks: u64::from(stat.blocks()), - #[allow(clippy::allow_attributes)] - #[allow(clippy::useless_conversion)] - free_blocks: u64::from(stat.blocks_free()), - #[allow(clippy::allow_attributes)] - #[allow(clippy::useless_conversion)] - available_blocks: u64::from(stat.blocks_available()), - total_inodes: self.icache.inode_count() as u64, - #[allow(clippy::allow_attributes)] - #[allow(clippy::useless_conversion)] - free_inodes: u64::from(stat.files_free()), - #[allow(clippy::allow_attributes)] - #[allow(clippy::useless_conversion)] - available_inodes: u64::from(stat.files_available()), - filesystem_id: 0, - mount_flags: 0, - #[expect( - clippy::cast_possible_truncation, - reason = "max filename length always fits in u32" - )] - max_filename_length: stat.name_max() as u32, - }) - } -} diff --git a/src/fs/mescloud/icache.rs b/src/fs/mescloud/icache.rs index e2231a0..9751dec 100644 --- a/src/fs/mescloud/icache.rs +++ b/src/fs/mescloud/icache.rs @@ -111,11 +111,6 @@ impl> MescloudICache { self.inner.contains(ino) } - #[expect(dead_code, reason = "public API method for future use")] - pub fn contains_resolved(&self, ino: Inode) -> bool { - self.inner.contains_resolved(ino) - } - pub async fn get_icb( &self, ino: Inode, @@ -124,15 +119,6 @@ impl> MescloudICache { self.inner.get_icb(ino, f).await } - #[expect(dead_code, reason = "public API method for future use")] - pub async fn get_icb_mut( - &self, - ino: Inode, - f: impl FnOnce(&mut InodeControlBlock) -> T, - ) -> Option { - self.inner.get_icb_mut(ino, f).await - } - pub async fn insert_icb(&self, ino: Inode, icb: InodeControlBlock) { self.inner.insert_icb(ino, icb).await; } @@ -162,18 +148,6 @@ impl> MescloudICache { self.inner.get_or_resolve(ino, then).await } - // -- Delegated (sync) -- - - #[expect(dead_code, reason = "public API method for future use")] - pub fn for_each(&self, f: impl FnMut(&Inode, &InodeControlBlock)) { - self.inner.for_each(f); - } - - #[expect(dead_code, reason = "public API method for future use")] - pub fn inode_count(&self) -> usize { - self.inner.inode_count() - } - // -- Domain-specific -- /// Allocate a new inode number. @@ -249,12 +223,6 @@ impl> MescloudICache { .await; ino } - - /// Direct access to the inner async cache for resolvers that need it. - #[expect(dead_code, reason = "public API method for future use")] - pub fn inner(&self) -> &AsyncICache { - &self.inner - } } #[cfg(test)] diff --git a/src/fs/mescloud/org.rs b/src/fs/mescloud/org.rs index a8aac36..12f8a44 100644 --- a/src/fs/mescloud/org.rs +++ b/src/fs/mescloud/org.rs @@ -111,18 +111,6 @@ impl OrgFs { self.name == "github" } - /// Decode a base64-encoded repo name from the API. Returns "owner/repo". - /// TODO(MES-674): Cleanup "special" casing for github. - #[expect(dead_code)] - fn decode_github_repo_name(encoded: &str) -> Option { - use base64::Engine as _; - let bytes = base64::engine::general_purpose::STANDARD - .decode(encoded) - .ok()?; - let decoded = String::from_utf8(bytes).ok()?; - decoded.contains('/').then_some(decoded) - } - /// Encode "owner/repo" to base64 for API calls. /// TODO(MES-674): Cleanup "special" casing for github. fn encode_github_repo_name(decoded: &str) -> String { diff --git a/src/fs/mod.rs b/src/fs/mod.rs index ef40322..003e1b0 100644 --- a/src/fs/mod.rs +++ b/src/fs/mod.rs @@ -1,5 +1,4 @@ pub mod fuser; pub mod icache; -pub mod local; pub mod mescloud; pub mod r#trait; From f3ae87c736f9be034074fd1738e0bdfbf2964e1c Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 15:23:28 -0800 Subject: [PATCH 30/57] chore: add explicit span names to all #[instrument] attributes Use `name = "Struct::method"` so indicatif spinners show the owning type, not just the bare function name. --- src/fs/fuser.rs | 19 +++++++++++-------- src/fs/mescloud/mod.rs | 14 +++++++------- src/fs/mescloud/org.rs | 14 +++++++------- src/fs/mescloud/repo.rs | 14 +++++++------- 4 files changed, 32 insertions(+), 29 deletions(-) diff --git a/src/fs/fuser.rs b/src/fs/fuser.rs index 7fae3df..86ddabb 100644 --- a/src/fs/fuser.rs +++ b/src/fs/fuser.rs @@ -154,7 +154,7 @@ where F::ReaddirError: Into, F::ReleaseError: Into, { - #[instrument(skip(self, _req, reply))] + #[instrument(name = "FuserAdapter::lookup", skip(self, _req, reply))] fn lookup( &mut self, _req: &fuser::Request<'_>, @@ -178,7 +178,7 @@ where } } - #[instrument(skip(self, _req, fh, reply))] + #[instrument(name = "FuserAdapter::getattr", skip(self, _req, fh, reply))] fn getattr( &mut self, _req: &fuser::Request<'_>, @@ -198,7 +198,7 @@ where } } - #[instrument(skip(self, _req, _fh, offset, reply))] + #[instrument(name = "FuserAdapter::readdir", skip(self, _req, _fh, offset, reply))] fn readdir( &mut self, _req: &fuser::Request<'_>, @@ -243,7 +243,7 @@ where reply.ok(); } - #[instrument(skip(self, _req, flags, reply))] + #[instrument(name = "FuserAdapter::open", skip(self, _req, flags, reply))] fn open(&mut self, _req: &fuser::Request<'_>, ino: u64, flags: i32, reply: fuser::ReplyOpen) { match self.runtime.block_on(self.fs.open(ino, flags.into())) { Ok(open_file) => { @@ -257,7 +257,10 @@ where } } - #[instrument(skip(self, _req, fh, offset, size, flags, lock_owner, reply))] + #[instrument( + name = "FuserAdapter::read", + skip(self, _req, fh, offset, size, flags, lock_owner, reply) + )] fn read( &mut self, _req: &fuser::Request<'_>, @@ -290,7 +293,7 @@ where } } - #[instrument(skip(self, _req, _lock_owner, reply))] + #[instrument(name = "FuserAdapter::release", skip(self, _req, _lock_owner, reply))] fn release( &mut self, _req: &fuser::Request<'_>, @@ -316,12 +319,12 @@ where } } - #[instrument(skip(self, _req, nlookup))] + #[instrument(name = "FuserAdapter::forget", skip(self, _req, nlookup))] fn forget(&mut self, _req: &fuser::Request<'_>, ino: u64, nlookup: u64) { self.runtime.block_on(self.fs.forget(ino, nlookup)); } - #[instrument(skip(self, _req, _ino, reply))] + #[instrument(name = "FuserAdapter::statfs", skip(self, _req, _ino, reply))] fn statfs(&mut self, _req: &fuser::Request<'_>, _ino: u64, reply: fuser::ReplyStatfs) { self.runtime.block_on(async { match self.fs.statfs().await { diff --git a/src/fs/mescloud/mod.rs b/src/fs/mescloud/mod.rs index 07a6001..cf887e2 100644 --- a/src/fs/mescloud/mod.rs +++ b/src/fs/mescloud/mod.rs @@ -245,7 +245,7 @@ impl Fs for MesaFS { type ReaddirError = ReadDirError; type ReleaseError = ReleaseError; - #[instrument(skip(self))] + #[instrument(name = "MesaFS::lookup", skip(self))] async fn lookup(&mut self, parent: Inode, name: &OsStr) -> Result { match self.inode_role(parent).await { InodeRole::Root => { @@ -267,7 +267,7 @@ impl Fs for MesaFS { } } - #[instrument(skip(self))] + #[instrument(name = "MesaFS::getattr", skip(self))] async fn getattr( &mut self, ino: Inode, @@ -276,7 +276,7 @@ impl Fs for MesaFS { self.composite.delegated_getattr(ino).await } - #[instrument(skip(self))] + #[instrument(name = "MesaFS::readdir", skip(self))] async fn readdir(&mut self, ino: Inode) -> Result<&[DirEntry], ReadDirError> { match self.inode_role(ino).await { InodeRole::Root => { @@ -306,12 +306,12 @@ impl Fs for MesaFS { } } - #[instrument(skip(self))] + #[instrument(name = "MesaFS::open", skip(self))] async fn open(&mut self, ino: Inode, flags: OpenFlags) -> Result { self.composite.delegated_open(ino, flags).await } - #[instrument(skip(self))] + #[instrument(name = "MesaFS::read", skip(self))] async fn read( &mut self, ino: Inode, @@ -326,7 +326,7 @@ impl Fs for MesaFS { .await } - #[instrument(skip(self))] + #[instrument(name = "MesaFS::release", skip(self))] async fn release( &mut self, ino: Inode, @@ -339,7 +339,7 @@ impl Fs for MesaFS { .await } - #[instrument(skip(self))] + #[instrument(name = "MesaFS::forget", skip(self))] async fn forget(&mut self, ino: Inode, nlookups: u64) { // MesaFS has no extra state to clean up on eviction (unlike OrgFs::owner_inodes). let _ = self.composite.delegated_forget(ino, nlookups).await; diff --git a/src/fs/mescloud/org.rs b/src/fs/mescloud/org.rs index 12f8a44..350fdc8 100644 --- a/src/fs/mescloud/org.rs +++ b/src/fs/mescloud/org.rs @@ -346,7 +346,7 @@ impl Fs for OrgFs { type ReaddirError = ReadDirError; type ReleaseError = ReleaseError; - #[instrument(skip(self), fields(org = %self.name))] + #[instrument(name = "OrgFs::lookup", skip(self), fields(org = %self.name))] async fn lookup(&mut self, parent: Inode, name: &OsStr) -> Result { match self.inode_role(parent).await { InodeRole::OrgRoot => { @@ -406,7 +406,7 @@ impl Fs for OrgFs { } } - #[instrument(skip(self), fields(org = %self.name))] + #[instrument(name = "OrgFs::getattr", skip(self), fields(org = %self.name))] async fn getattr( &mut self, ino: Inode, @@ -415,7 +415,7 @@ impl Fs for OrgFs { self.composite.delegated_getattr(ino).await } - #[instrument(skip(self), fields(org = %self.name))] + #[instrument(name = "OrgFs::readdir", skip(self), fields(org = %self.name))] async fn readdir(&mut self, ino: Inode) -> Result<&[DirEntry], ReadDirError> { match self.inode_role(ino).await { InodeRole::OrgRoot => { @@ -468,12 +468,12 @@ impl Fs for OrgFs { } } - #[instrument(skip(self), fields(org = %self.name))] + #[instrument(name = "OrgFs::open", skip(self), fields(org = %self.name))] async fn open(&mut self, ino: Inode, flags: OpenFlags) -> Result { self.composite.delegated_open(ino, flags).await } - #[instrument(skip(self), fields(org = %self.name))] + #[instrument(name = "OrgFs::read", skip(self), fields(org = %self.name))] async fn read( &mut self, ino: Inode, @@ -488,7 +488,7 @@ impl Fs for OrgFs { .await } - #[instrument(skip(self), fields(org = %self.name))] + #[instrument(name = "OrgFs::release", skip(self), fields(org = %self.name))] async fn release( &mut self, ino: Inode, @@ -501,7 +501,7 @@ impl Fs for OrgFs { .await } - #[instrument(skip(self), fields(org = %self.name))] + #[instrument(name = "OrgFs::forget", skip(self), fields(org = %self.name))] async fn forget(&mut self, ino: Inode, nlookups: u64) { let evicted = self.composite.delegated_forget(ino, nlookups).await; if evicted { diff --git a/src/fs/mescloud/repo.rs b/src/fs/mescloud/repo.rs index 23737ab..0d5673f 100644 --- a/src/fs/mescloud/repo.rs +++ b/src/fs/mescloud/repo.rs @@ -246,7 +246,7 @@ impl Fs for RepoFs { type ReaddirError = ReadDirError; type ReleaseError = ReleaseError; - #[instrument(skip(self), fields(repo = %self.repo_name))] + #[instrument(name = "RepoFs::lookup", skip(self), fields(repo = %self.repo_name))] async fn lookup(&mut self, parent: Inode, name: &OsStr) -> Result { debug_assert!( self.icache.contains(parent), @@ -266,7 +266,7 @@ impl Fs for RepoFs { Ok(attr) } - #[instrument(skip(self), fields(repo = %self.repo_name))] + #[instrument(name = "RepoFs::getattr", skip(self), fields(repo = %self.repo_name))] async fn getattr( &mut self, ino: Inode, @@ -278,7 +278,7 @@ impl Fs for RepoFs { }) } - #[instrument(skip(self), fields(repo = %self.repo_name))] + #[instrument(name = "RepoFs::readdir", skip(self), fields(repo = %self.repo_name))] async fn readdir(&mut self, ino: Inode) -> Result<&[DirEntry], ReadDirError> { debug_assert!( self.icache.contains(ino), @@ -349,7 +349,7 @@ impl Fs for RepoFs { Ok(&self.readdir_buf) } - #[instrument(skip(self), fields(repo = %self.repo_name))] + #[instrument(name = "RepoFs::open", skip(self), fields(repo = %self.repo_name))] async fn open(&mut self, ino: Inode, _flags: OpenFlags) -> Result { if !self.icache.contains(ino) { warn!(ino, "open on unknown inode"); @@ -371,7 +371,7 @@ impl Fs for RepoFs { }) } - #[instrument(skip(self), fields(repo = %self.repo_name))] + #[instrument(name = "RepoFs::read", skip(self), fields(repo = %self.repo_name))] async fn read( &mut self, ino: Inode, @@ -427,7 +427,7 @@ impl Fs for RepoFs { Ok(Bytes::copy_from_slice(&decoded[start..end])) } - #[instrument(skip(self), fields(repo = %self.repo_name))] + #[instrument(name = "RepoFs::release", skip(self), fields(repo = %self.repo_name))] async fn release( &mut self, ino: Inode, @@ -447,7 +447,7 @@ impl Fs for RepoFs { Ok(()) } - #[instrument(skip(self), fields(repo = %self.repo_name))] + #[instrument(name = "RepoFs::forget", skip(self), fields(repo = %self.repo_name))] async fn forget(&mut self, ino: Inode, nlookups: u64) { debug_assert!( self.icache.contains(ino), From 7d09f409206c4ae3ec5fde28ce16e2fd0a1df814 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 17:55:31 -0800 Subject: [PATCH 31/57] more telemetry --- Cargo.lock | 196 +++++++++++++++++++++++++++++++++++ Cargo.toml | 6 +- src/fs/icache/async_cache.rs | 10 +- src/fs/mescloud/composite.rs | 11 +- src/fs/mescloud/mod.rs | 3 +- src/fs/mescloud/org.rs | 3 +- src/fs/mescloud/repo.rs | 5 +- src/trc.rs | 54 ++++++++-- 8 files changed, 275 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 82b64b1..da700ea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -513,6 +513,12 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + [[package]] name = "encode_unicode" version = "1.0.0" @@ -766,6 +772,9 @@ dependencies = [ "mesa-dev", "nix", "num-traits", + "opentelemetry", + "opentelemetry-otlp", + "opentelemetry_sdk", "rand", "reqwest", "reqwest-middleware", @@ -783,10 +792,17 @@ dependencies = [ "toml", "tracing", "tracing-indicatif", + "tracing-opentelemetry", "tracing-subscriber", "vergen-gitcl", ] +[[package]] +name = "glob" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" + [[package]] name = "h2" version = "0.4.13" @@ -1182,6 +1198,15 @@ version = "1.70.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.17" @@ -1464,6 +1489,84 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "opentelemetry" +version = "0.29.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e87237e2775f74896f9ad219d26a2081751187eb7c9f5c58dde20a23b95d16c" +dependencies = [ + "futures-core", + "futures-sink", + "js-sys", + "pin-project-lite", + "thiserror 2.0.18", + "tracing", +] + +[[package]] +name = "opentelemetry-http" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46d7ab32b827b5b495bd90fa95a6cb65ccc293555dcc3199ae2937d2d237c8ed" +dependencies = [ + "async-trait", + "bytes", + "http", + "opentelemetry", + "reqwest", + "tracing", +] + +[[package]] +name = "opentelemetry-otlp" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d899720fe06916ccba71c01d04ecd77312734e2de3467fd30d9d580c8ce85656" +dependencies = [ + "futures-core", + "http", + "opentelemetry", + "opentelemetry-http", + "opentelemetry-proto", + "opentelemetry_sdk", + "prost", + "reqwest", + "thiserror 2.0.18", + "tracing", +] + +[[package]] +name = "opentelemetry-proto" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c40da242381435e18570d5b9d50aca2a4f4f4d8e146231adb4e7768023309b3" +dependencies = [ + "opentelemetry", + "opentelemetry_sdk", + "prost", + "tonic", +] + +[[package]] +name = "opentelemetry_sdk" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afdefb21d1d47394abc1ba6c57363ab141be19e27cc70d0e422b7f303e4d290b" +dependencies = [ + "futures-channel", + "futures-executor", + "futures-util", + "glob", + "opentelemetry", + "percent-encoding", + "rand", + "serde_json", + "thiserror 2.0.18", + "tokio", + "tokio-stream", + "tracing", +] + [[package]] name = "option-ext" version = "0.2.0" @@ -1509,6 +1612,26 @@ version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" +[[package]] +name = "pin-project" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "pin-project-lite" version = "0.2.16" @@ -1576,6 +1699,29 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "prost" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-derive" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "quick-xml" version = "0.37.5" @@ -2466,6 +2612,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-stream" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32da49809aab5c3bc678af03902d4ccddea2a87d028d86392a4b1560c6906c70" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + [[package]] name = "tokio-util" version = "0.7.18" @@ -2518,6 +2675,27 @@ version = "1.0.6+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab16f14aed21ee8bfd8ec22513f7287cd4a91aa92e44edfe2c17ddd004e92607" +[[package]] +name = "tonic" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" +dependencies = [ + "async-trait", + "base64", + "bytes", + "http", + "http-body", + "http-body-util", + "percent-encoding", + "pin-project", + "prost", + "tokio-stream", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tower" version = "0.5.3" @@ -2618,6 +2796,24 @@ dependencies = [ "tracing-core", ] +[[package]] +name = "tracing-opentelemetry" +version = "0.30.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd8e764bd6f5813fd8bebc3117875190c5b0415be8f7f8059bffb6ecd979c444" +dependencies = [ + "js-sys", + "once_cell", + "opentelemetry", + "opentelemetry_sdk", + "smallvec", + "tracing", + "tracing-core", + "tracing-log", + "tracing-subscriber", + "web-time", +] + [[package]] name = "tracing-subscriber" version = "0.3.22" diff --git a/Cargo.toml b/Cargo.toml index 3c0f8cf..7798fd7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,7 +12,7 @@ clap = { version = "4.5.54", features = ["derive", "env"] } tracing-subscriber = { version = "0.3", features = ["env-filter", "fmt"] } fuser = { version = "0.16.0", features = ["libfuse"] } libc = "0.2" -mesa-dev = "1.8.0" +mesa-dev = "1.11.0" num-traits = "0.2" reqwest = { version = "0.12", default-features = false } reqwest-middleware = "0.4" @@ -42,6 +42,10 @@ semver = "1.0" shellexpand = "3.1" inquire = "0.9.2" tracing-indicatif = "0.3.14" +opentelemetry = "0.29" +opentelemetry_sdk = { version = "0.29", features = ["rt-tokio"] } +opentelemetry-otlp = { version = "0.29", features = ["http-proto", "trace", "reqwest-blocking-client"] } +tracing-opentelemetry = "0.30" [features] default = [] diff --git a/src/fs/icache/async_cache.rs b/src/fs/icache/async_cache.rs index 024a909..889e5a1 100644 --- a/src/fs/icache/async_cache.rs +++ b/src/fs/icache/async_cache.rs @@ -5,7 +5,7 @@ use std::future::Future; use scc::HashMap as ConcurrentHashMap; use tokio::sync::watch; -use tracing::{trace, warn}; +use tracing::{instrument, trace, warn}; use crate::fs::r#trait::Inode; @@ -82,6 +82,7 @@ impl AsyncICache { /// Wait until `ino` is `Available`. /// Returns `true` if the entry exists and is Available, /// `false` if the entry does not exist. + #[instrument(name = "AsyncICache::wait_for_available", skip(self))] async fn wait_for_available(&self, ino: Inode) -> bool { let rx = self .inode_table @@ -118,6 +119,7 @@ impl AsyncICache { /// Read an ICB via closure. **Awaits** if `InFlight`. /// Returns `None` if `ino` doesn't exist. + #[instrument(name = "AsyncICache::get_icb", skip(self, f))] pub async fn get_icb(&self, ino: Inode, f: impl FnOnce(&R::Icb) -> T) -> Option { if !self.wait_for_available(ino).await { return None; @@ -133,6 +135,7 @@ impl AsyncICache { /// Mutate an ICB via closure. **Awaits** if `InFlight`. /// Returns `None` if `ino` doesn't exist. + #[instrument(name = "AsyncICache::get_icb_mut", skip(self, f))] pub async fn get_icb_mut(&self, ino: Inode, f: impl FnOnce(&mut R::Icb) -> T) -> Option { if !self.wait_for_available(ino).await { return None; @@ -147,6 +150,7 @@ impl AsyncICache { } /// Insert an ICB directly as `Available` (overwrites any existing entry). + #[instrument(name = "AsyncICache::insert_icb", skip(self, icb))] pub async fn insert_icb(&self, ino: Inode, icb: R::Icb) { self.inode_table .upsert_async(ino, IcbState::Available(icb)) @@ -158,6 +162,7 @@ impl AsyncICache { /// /// Both `factory` and `then` are `FnOnce` — wrapped in `Option` internally /// to satisfy the borrow checker across the await-loop. + #[instrument(name = "AsyncICache::entry_or_insert_icb", skip(self, factory, then))] pub async fn entry_or_insert_icb( &self, ino: Inode, @@ -202,6 +207,7 @@ impl AsyncICache { /// /// Returns `Err(R::Error)` if resolution fails. On error the `InFlight` /// entry is removed so subsequent calls can retry. + #[instrument(name = "AsyncICache::get_or_resolve", skip(self, then))] pub async fn get_or_resolve( &self, ino: Inode, @@ -294,6 +300,7 @@ impl AsyncICache { } /// Increment rc. **Awaits** `InFlight`. Panics if inode is missing. + #[instrument(name = "AsyncICache::inc_rc", skip(self))] pub async fn inc_rc(&self, ino: Inode) -> u64 { self.wait_for_available(ino).await; self.inode_table @@ -310,6 +317,7 @@ impl AsyncICache { /// Decrement rc by `nlookups`. If rc drops to zero, evicts and returns /// the ICB. **Awaits** `InFlight` entries. + #[instrument(name = "AsyncICache::forget", skip(self))] pub async fn forget(&self, ino: Inode, nlookups: u64) -> Option { if !self.wait_for_available(ino).await { warn!(ino, "forget on unknown inode"); diff --git a/src/fs/mescloud/composite.rs b/src/fs/mescloud/composite.rs index bae5b46..69d5f32 100644 --- a/src/fs/mescloud/composite.rs +++ b/src/fs/mescloud/composite.rs @@ -2,7 +2,7 @@ use std::collections::HashMap; use std::ffi::OsStr; use bytes::Bytes; -use tracing::{trace, warn}; +use tracing::{instrument, trace, warn}; use crate::fs::icache::bridge::HashMapBridge; use crate::fs::icache::{FileTable, IcbResolver}; @@ -53,6 +53,7 @@ where + Sync, { /// Walk the parent chain to find which child slot owns an inode. + #[instrument(name = "CompositeFs::slot_for_inode", skip(self))] pub async fn slot_for_inode(&self, ino: Inode) -> Option { if let Some(&idx) = self.child_inodes.get(&ino) { return Some(idx); @@ -81,6 +82,7 @@ where /// Translate an inner inode to an outer inode, allocating if needed. /// Also inserts a stub ICB into the outer icache when the inode is new. + #[instrument(name = "CompositeFs::translate_inner_ino", skip(self, name))] pub async fn translate_inner_ino( &mut self, slot_idx: usize, @@ -108,6 +110,7 @@ where } /// Get cached file attributes for an inode. + #[instrument(name = "CompositeFs::delegated_getattr", skip(self))] pub async fn delegated_getattr(&self, ino: Inode) -> Result { self.icache.get_attr(ino).await.ok_or_else(|| { warn!(ino, "getattr on unknown inode"); @@ -116,6 +119,7 @@ where } /// Find slot, forward inode, delegate to inner, allocate outer file handle. + #[instrument(name = "CompositeFs::delegated_open", skip(self))] pub async fn delegated_open( &mut self, ino: Inode, @@ -144,6 +148,7 @@ where /// Find slot, forward inode and file handle, delegate read to inner. #[expect(clippy::too_many_arguments, reason = "mirrors fuser read API")] + #[instrument(name = "CompositeFs::delegated_read", skip(self))] pub async fn delegated_read( &mut self, ino: Inode, @@ -172,6 +177,7 @@ where /// Find slot, forward inode and file handle, delegate release to inner, /// then clean up the file handle mapping. + #[instrument(name = "CompositeFs::delegated_release", skip(self))] pub async fn delegated_release( &mut self, ino: Inode, @@ -202,6 +208,7 @@ where /// Propagate forget to the inner filesystem, evict from icache, and clean /// up bridge mappings. Returns `true` if the inode was evicted. #[must_use] + #[instrument(name = "CompositeFs::delegated_forget", skip(self))] pub async fn delegated_forget(&mut self, ino: Inode, nlookups: u64) -> bool { if let Some(idx) = self.slot_for_inode(ino).await && let Some(&inner_ino) = self.slots[idx].bridge.inode_map_get_by_left(ino) @@ -226,6 +233,7 @@ where } /// Delegation branch for lookup when the parent is owned by a child slot. + #[instrument(name = "CompositeFs::delegated_lookup", skip(self, name))] pub async fn delegated_lookup( &mut self, parent: Inode, @@ -249,6 +257,7 @@ where } /// Delegation branch for readdir when the inode is owned by a child slot. + #[instrument(name = "CompositeFs::delegated_readdir", skip(self))] pub async fn delegated_readdir(&mut self, ino: Inode) -> Result<&[DirEntry], ReadDirError> { let idx = self .slot_for_inode(ino) diff --git a/src/fs/mescloud/mod.rs b/src/fs/mescloud/mod.rs index cf887e2..2af3505 100644 --- a/src/fs/mescloud/mod.rs +++ b/src/fs/mescloud/mod.rs @@ -6,7 +6,7 @@ use std::time::SystemTime; use bytes::Bytes; use mesa_dev::MesaClient; use secrecy::ExposeSecret as _; -use tracing::{instrument, trace, warn}; +use tracing::{Instrument as _, instrument, trace, warn}; use crate::fs::icache::bridge::HashMapBridge; use crate::fs::icache::{AsyncICache, FileTable, IcbResolver}; @@ -77,6 +77,7 @@ impl IcbResolver for MesaResolver { ..stub }) } + .instrument(tracing::info_span!("MesaResolver::resolve", ino)) } } diff --git a/src/fs/mescloud/org.rs b/src/fs/mescloud/org.rs index 350fdc8..ad76546 100644 --- a/src/fs/mescloud/org.rs +++ b/src/fs/mescloud/org.rs @@ -7,7 +7,7 @@ use bytes::Bytes; use futures::TryStreamExt as _; use mesa_dev::MesaClient; use secrecy::SecretString; -use tracing::{instrument, trace, warn}; +use tracing::{Instrument as _, instrument, trace, warn}; pub use super::common::{ GetAttrError, LookupError, OpenError, ReadDirError, ReadError, ReleaseError, @@ -64,6 +64,7 @@ impl IcbResolver for OrgResolver { ..stub }) } + .instrument(tracing::info_span!("OrgResolver::resolve", ino)) } } diff --git a/src/fs/mescloud/repo.rs b/src/fs/mescloud/repo.rs index 0d5673f..4b65f77 100644 --- a/src/fs/mescloud/repo.rs +++ b/src/fs/mescloud/repo.rs @@ -10,7 +10,7 @@ use bytes::Bytes; use mesa_dev::MesaClient; use mesa_dev::low_level::content::{Content, DirEntry as MesaDirEntry}; use num_traits::cast::ToPrimitive as _; -use tracing::{instrument, trace, warn}; +use tracing::{Instrument as _, instrument, trace, warn}; use crate::fs::icache::{AsyncICache, FileTable, IcbResolver}; use crate::fs::r#trait::{ @@ -63,7 +63,7 @@ impl IcbResolver for RepoResolver { .repos() .at(&repo_name) .content() - .get(Some(ref_.as_str()), file_path.as_deref(), None) + .get(Some(ref_.as_str()), file_path.as_deref(), Some(1u64)) .await .map_err(MesaApiError::from)?; @@ -122,6 +122,7 @@ impl IcbResolver for RepoResolver { children, }) } + .instrument(tracing::info_span!("RepoResolver::resolve", ino)) } } diff --git a/src/trc.rs b/src/trc.rs index 2c1b899..40297d7 100644 --- a/src/trc.rs +++ b/src/trc.rs @@ -3,6 +3,8 @@ //! The tracing subscriber is built with a [`reload::Layer`] wrapping the fmt layer so that the //! output format can be switched at runtime (e.g. from pretty mode to ugly mode when daemonizing). +use opentelemetry::trace::TracerProvider as _; +use opentelemetry_sdk::Resource; use tracing_indicatif::IndicatifLayer; use tracing_subscriber::{ EnvFilter, Registry, @@ -39,6 +41,17 @@ impl TrcMode { /// A handle that allows reconfiguring the tracing subscriber at runtime. pub struct TrcHandle { fmt_handle: FmtReloadHandle, + tracer_provider: Option, +} + +impl Drop for TrcHandle { + fn drop(&mut self) { + if let Some(provider) = self.tracer_provider.take() + && let Err(e) = provider.shutdown() + { + eprintln!("Failed to shutdown OpenTelemetry tracer: {e}"); + } + } } impl TrcHandle { @@ -117,6 +130,7 @@ impl Trc { ); let (reload_layer, fmt_handle) = reload::Layer::new(initial_layer); + let mut tracer_provider = None; match self.mode { TrcMode::丑 { .. } => { @@ -142,14 +156,42 @@ impl Trc { .try_init()?; } TrcMode::Ugly { .. } => { - // The initial layer is already configured for ugly mode, so just init directly. - tracing_subscriber::registry() - .with(reload_layer) - .with(self.env_filter) - .try_init()?; + let exporter = opentelemetry_otlp::SpanExporter::builder() + .with_http() + .build() + .ok(); + + if let Some(exporter) = exporter { + let provider = opentelemetry_sdk::trace::SdkTracerProvider::builder() + .with_batch_exporter(exporter) + .with_resource( + Resource::builder_empty() + .with_service_name("git-fs") + .build(), + ) + .build(); + let tracer = provider.tracer("git-fs"); + let otel_layer = tracing_opentelemetry::layer().with_tracer(tracer); + + tracing_subscriber::registry() + .with(reload_layer) + .with(otel_layer) + .with(self.env_filter) + .try_init()?; + + tracer_provider = Some(provider); + } else { + tracing_subscriber::registry() + .with(reload_layer) + .with(self.env_filter) + .try_init()?; + } } } - Ok(TrcHandle { fmt_handle }) + Ok(TrcHandle { + fmt_handle, + tracer_provider, + }) } } From 76265aa54e014c0e34bb46262145af7744e8feb9 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 18:02:33 -0800 Subject: [PATCH 32/57] revert bad deletion --- Cargo.lock | 5 +- src/fs/local.rs | 448 ++++++++++++++++++++++++++++++++++++++++++++++++ src/fs/mod.rs | 1 + 3 files changed, 452 insertions(+), 2 deletions(-) create mode 100644 src/fs/local.rs diff --git a/Cargo.lock b/Cargo.lock index da700ea..2aeba04 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1307,9 +1307,9 @@ checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" [[package]] name = "mesa-dev" -version = "1.8.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5d4651070e6257276f86eb0737bfe37bd6a6a73f7de827fca4efaef55da091" +checksum = "0685415ca22ab4f72a6a3f0f720fcca0c69d295d2a95a40cd3ce92555103d3a1" dependencies = [ "async-stream", "futures-core", @@ -1319,6 +1319,7 @@ dependencies = [ "serde", "serde_json", "serde_path_to_error", + "tracing", ] [[package]] diff --git a/src/fs/local.rs b/src/fs/local.rs new file mode 100644 index 0000000..73e41de --- /dev/null +++ b/src/fs/local.rs @@ -0,0 +1,448 @@ +//! An implementation of a filesystem that directly overlays the host filesystem. +use bytes::Bytes; +use nix::sys::statvfs::statvfs; +use std::{collections::HashMap, path::PathBuf}; +use thiserror::Error; +use tokio::io::{AsyncReadExt as _, AsyncSeekExt as _}; + +use std::ffi::OsStr; +use tracing::warn; + +use crate::fs::icache::{ICache, IcbLike}; +use crate::fs::r#trait::{ + DirEntry, FileAttr, FileHandle, FileOpenOptions, FilesystemStats, Fs, Inode, LockOwner, + OpenFile, OpenFlags, +}; + +#[derive(Debug, Error)] +pub enum LookupError { + #[error("inode not found")] + InodeNotFound, + #[error("io error: {0}")] + Io(#[from] std::io::Error), + + #[error("invalid file type")] + InvalidFileType, +} + +impl From for i32 { + fn from(e: LookupError) -> Self { + match e { + LookupError::InodeNotFound => libc::ENOENT, + LookupError::Io(ref io_err) => io_err.raw_os_error().unwrap_or(libc::EIO), + LookupError::InvalidFileType => libc::EINVAL, + } + } +} + +#[derive(Debug, Error)] +pub enum GetAttrError { + #[error("inode not found")] + InodeNotFound, + + #[error("io error: {0}")] + Io(#[from] std::io::Error), + + #[error("invalid file type")] + InvalidFileType, +} + +impl From for i32 { + fn from(e: GetAttrError) -> Self { + match e { + GetAttrError::InodeNotFound => libc::ENOENT, + GetAttrError::Io(ref io_err) => io_err.raw_os_error().unwrap_or(libc::EIO), + GetAttrError::InvalidFileType => libc::EINVAL, + } + } +} + +#[derive(Debug, Error)] +pub enum OpenError { + #[error("inode not found")] + InodeNotFound, + + #[error("io error: {0}")] + Io(#[from] std::io::Error), +} + +impl From for i32 { + fn from(e: OpenError) -> Self { + match e { + OpenError::InodeNotFound => libc::ENOENT, + OpenError::Io(ref io_err) => io_err.raw_os_error().unwrap_or(libc::EIO), + } + } +} + +#[derive(Debug, Error)] +pub enum ReadError { + #[error("inode not found")] + InodeNotFound, + + #[error("file not open")] + FileNotOpen, + + #[error("io error: {0}")] + Io(#[from] std::io::Error), +} + +impl From for i32 { + fn from(e: ReadError) -> Self { + match e { + ReadError::InodeNotFound => libc::ENOENT, + ReadError::FileNotOpen => libc::EBADF, + ReadError::Io(ref io_err) => io_err.raw_os_error().unwrap_or(libc::EIO), + } + } +} + +#[derive(Debug, Error)] +pub enum ReleaseError { + #[error("file not open")] + FileNotOpen, +} + +impl From for i32 { + fn from(e: ReleaseError) -> Self { + match e { + ReleaseError::FileNotOpen => libc::EBADF, + } + } +} + +#[derive(Debug, Error)] +pub enum ReadDirError { + #[error("inode not found")] + InodeNotFound, + + #[error("io error: {0}")] + Io(#[from] std::io::Error), + + #[error("invalid file type")] + InvalidFileType, +} + +impl From for i32 { + fn from(e: ReadDirError) -> Self { + match e { + ReadDirError::InodeNotFound => libc::ENOENT, + ReadDirError::Io(ref io_err) => io_err.raw_os_error().unwrap_or(libc::EIO), + ReadDirError::InvalidFileType => libc::EINVAL, + } + } +} + +struct InodeControlBlock { + pub rc: u64, + pub path: PathBuf, + pub children: Option>, +} + +impl IcbLike for InodeControlBlock { + fn new_root(path: PathBuf) -> Self { + Self { + rc: 1, + path, + children: None, + } + } + + fn rc(&self) -> u64 { + self.rc + } + + fn rc_mut(&mut self) -> &mut u64 { + &mut self.rc + } +} + +pub struct LocalFs { + icache: ICache, + open_files: HashMap, +} + +impl LocalFs { + #[expect(dead_code, reason = "alternative filesystem implementation")] + pub fn new(abs_path: impl Into) -> Self { + Self { + icache: ICache::new(1, abs_path), + open_files: HashMap::new(), + } + } + + fn abspath(&self) -> &PathBuf { + &self + .icache + .get_icb(1) + .unwrap_or_else(|| unreachable!("root inode 1 must always exist in inode_table")) + .path + } + + async fn parse_tokio_dirent( + dir_entry: &tokio::fs::DirEntry, + ) -> Result { + return Ok(DirEntry { + ino: dir_entry.ino(), + name: dir_entry.file_name(), + kind: dir_entry.file_type().await?.try_into().map_err(|()| { + tokio::io::Error::new( + tokio::io::ErrorKind::InvalidData, + "invalid file type in directory entry", + ) + })?, + }); + } +} + +#[async_trait::async_trait] +impl Fs for LocalFs { + type LookupError = LookupError; + type GetAttrError = GetAttrError; + type OpenError = OpenError; + type ReadError = ReadError; + type ReaddirError = ReadDirError; + type ReleaseError = ReleaseError; + + async fn lookup(&mut self, parent: Inode, name: &OsStr) -> Result { + debug_assert!( + self.icache.contains(parent), + "parent inode {parent} not in inode_table" + ); + let parent_icb = self.icache.get_icb(parent).ok_or_else(|| { + warn!( + "Lookup called on unknown parent inode {}. This is a programming bug", + parent + ); + LookupError::InodeNotFound + })?; + + let child_path = parent_icb.path.join(name); + let meta = tokio::fs::metadata(&child_path) + .await + .map_err(LookupError::Io)?; + + let file_attr = FileAttr::try_from(meta).map_err(|()| LookupError::InvalidFileType); + debug_assert!(file_attr.is_ok(), "FileAttr conversion failed unexpectedly"); + let file_attr = file_attr?; + + let icb = self + .icache + .entry_or_insert_icb(file_attr.common().ino, || InodeControlBlock { + rc: 0, + path: child_path, + children: None, + }); + *icb.rc_mut() += 1; + + Ok(file_attr) + } + + async fn getattr( + &mut self, + ino: Inode, + fh: Option, + ) -> Result { + if let Some(fh) = fh { + // The file was already opened, we can just call fstat. + debug_assert!( + self.open_files.contains_key(&fh), + "file handle {fh} not in open_files" + ); + let file = self.open_files.get(&fh).ok_or_else(|| { + warn!( + "GetAttr called on unknown file handle {}. This is a programming bug", + fh + ); + GetAttrError::InodeNotFound + })?; + + let meta = file.metadata().await.map_err(GetAttrError::Io)?; + let file_attr = FileAttr::try_from(meta).map_err(|()| GetAttrError::InvalidFileType); + debug_assert!(file_attr.is_ok(), "FileAttr conversion failed unexpectedly"); + + Ok(file_attr?) + } else { + // No open path, so we have to do a painful stat on the path. + debug_assert!(self.icache.contains(ino), "inode {ino} not in inode_table"); + let icb = self.icache.get_icb(ino).ok_or_else(|| { + warn!( + "GetAttr called on unknown inode {}. This is a programming bug", + ino + ); + GetAttrError::InodeNotFound + })?; + + let meta = tokio::fs::metadata(&icb.path) + .await + .map_err(GetAttrError::Io)?; + let file_attr = FileAttr::try_from(meta).map_err(|()| GetAttrError::InvalidFileType); + debug_assert!(file_attr.is_ok(), "FileAttr conversion failed unexpectedly"); + + Ok(file_attr?) + } + } + + async fn readdir(&mut self, ino: Inode) -> Result<&[DirEntry], ReadDirError> { + debug_assert!(self.icache.contains(ino), "inode {ino} not in inode_table"); + + let inode_cb = self.icache.get_icb(ino).ok_or_else(|| { + warn!( + parent = ino, + "Readdir of unknown parent inode. Programming bug" + ); + ReadDirError::InodeNotFound + })?; + + let mut read_dir = tokio::fs::read_dir(&inode_cb.path) + .await + .map_err(ReadDirError::Io)?; + + // Note that we HAVE to re-read all entries here, since there's really no way for us to + // know whether another process has modified the underlying directory, without our consent. + // + // TODO(markovejnovic): If we can guarantee that only our process has access to the + // underlying directory, we can avoid re-loading the entries every time. + // + // Two mechanisms appear to exist: namespace mount and/or file permissions. + // + // However, both of these mechanisms take time to develop and we don't have time. + let mut entries: Vec = Vec::new(); + while let Some(dir_entry) = read_dir.next_entry().await.map_err(ReadDirError::Io)? { + entries.push(Self::parse_tokio_dirent(&dir_entry).await?); + } + + let inode_cb = self.icache.get_icb_mut(ino).ok_or_else(|| { + warn!(parent = ino, "inode disappeared. TOCTOU programming bug"); + ReadDirError::InodeNotFound + })?; + + Ok(inode_cb.children.insert(entries)) + } + + async fn open(&mut self, ino: Inode, flags: OpenFlags) -> Result { + debug_assert!(self.icache.contains(ino), "inode {ino} not in inode_table"); + let icb = self.icache.get_icb(ino).ok_or_else(|| { + warn!( + "Open called on unknown inode {}. This is a programming bug", + ino + ); + OpenError::InodeNotFound + })?; + + // TODO(markovejnovic): Not all flags are supported here. We could do better. + let file = tokio::fs::OpenOptions::new() + .read(true) + .write(flags.contains(OpenFlags::RDWR) || flags.contains(OpenFlags::WRONLY)) + .append(flags.contains(OpenFlags::APPEND)) + .truncate(flags.contains(OpenFlags::TRUNC)) + .create(flags.contains(OpenFlags::CREAT)) + .open(&icb.path) + .await + .map_err(OpenError::Io)?; + + // Generate a new file handle. + let fh = self.icache.allocate_fh(); + self.open_files.insert(fh, file); + + Ok(OpenFile { + handle: fh, + // TODO(markovejnovic): Might be interesting to set some of these options. + options: FileOpenOptions::empty(), + }) + } + + async fn read( + &mut self, + ino: Inode, + fh: FileHandle, + offset: u64, + size: u32, + _flags: OpenFlags, + _lock_owner: Option, + ) -> Result { + // TODO(markovejnovic): Respect flags and lock_owner. + debug_assert!(self.icache.contains(ino), "inode {ino} not in inode_table"); + debug_assert!( + self.open_files.contains_key(&fh), + "file handle {fh} not in open_files" + ); + + let file: &mut tokio::fs::File = self.open_files.get_mut(&fh).ok_or_else(|| { + warn!( + "Read called on unknown file handle {}. This is a programming bug", + fh + ); + ReadError::FileNotOpen + })?; + + let mut buffer = vec![0u8; size as usize]; + file.seek(std::io::SeekFrom::Start(offset)) + .await + .map_err(ReadError::Io)?; + let nbytes = file.read(&mut buffer).await.map_err(ReadError::Io)?; + + buffer.truncate(nbytes); + Ok(Bytes::from(buffer)) + } + + async fn release( + &mut self, + _ino: Inode, + fh: FileHandle, + _flags: OpenFlags, + _flush: bool, + ) -> Result<(), ReleaseError> { + self.open_files.remove(&fh).ok_or_else(|| { + warn!( + "Release called on unknown file handle {}. Programming bug", + fh + ); + ReleaseError::FileNotOpen + })?; + Ok(()) + } + + async fn forget(&mut self, ino: Inode, nlookups: u64) { + debug_assert!(self.icache.contains(ino), "inode {ino} not in inode_table"); + + self.icache.forget(ino, nlookups); + } + + async fn statfs(&mut self) -> Result { + let stat = statvfs(self.abspath().as_path())?; + + Ok(FilesystemStats { + block_size: stat.block_size().try_into().map_err(|_| { + std::io::Error::new( + std::io::ErrorKind::InvalidData, + "block size too large to fit into u32", + ) + })?, + fragment_size: stat.fragment_size(), + #[allow(clippy::allow_attributes)] + #[allow(clippy::useless_conversion)] + total_blocks: u64::from(stat.blocks()), + #[allow(clippy::allow_attributes)] + #[allow(clippy::useless_conversion)] + free_blocks: u64::from(stat.blocks_free()), + #[allow(clippy::allow_attributes)] + #[allow(clippy::useless_conversion)] + available_blocks: u64::from(stat.blocks_available()), + total_inodes: self.icache.inode_count() as u64, + #[allow(clippy::allow_attributes)] + #[allow(clippy::useless_conversion)] + free_inodes: u64::from(stat.files_free()), + #[allow(clippy::allow_attributes)] + #[allow(clippy::useless_conversion)] + available_inodes: u64::from(stat.files_available()), + filesystem_id: 0, + mount_flags: 0, + #[expect( + clippy::cast_possible_truncation, + reason = "max filename length always fits in u32" + )] + max_filename_length: stat.name_max() as u32, + }) + } +} diff --git a/src/fs/mod.rs b/src/fs/mod.rs index 003e1b0..ef40322 100644 --- a/src/fs/mod.rs +++ b/src/fs/mod.rs @@ -1,4 +1,5 @@ pub mod fuser; pub mod icache; +pub mod local; pub mod mescloud; pub mod r#trait; From df6fbcc808b1e27573becb529573b312a59f49f3 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 18:09:50 -0800 Subject: [PATCH 33/57] refactor(icache): add IcbState::into_available() helper --- src/fs/icache/async_cache.rs | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/src/fs/icache/async_cache.rs b/src/fs/icache/async_cache.rs index 889e5a1..d011645 100644 --- a/src/fs/icache/async_cache.rs +++ b/src/fs/icache/async_cache.rs @@ -24,6 +24,16 @@ pub enum IcbState { Available(I), } +impl IcbState { + /// Consume `self`, returning the inner value if `Available`, or `None` if `InFlight`. + fn into_available(self) -> Option { + match self { + Self::Available(inner) => Some(inner), + Self::InFlight(_) => None, + } + } +} + /// Trait for resolving an inode to its control block. /// /// Implementations act as a "promise" that an ICB will eventually be produced @@ -905,6 +915,23 @@ mod tests { ); } + #[test] + fn icb_state_into_available_returns_inner() { + let state = IcbState::Available(TestIcb { + rc: 1, + path: "/test".into(), + resolved: true, + }); + assert!(state.into_available().is_some()); + } + + #[test] + fn icb_state_into_available_returns_none_for_inflight() { + let (_tx, rx) = watch::channel(()); + let state: IcbState = IcbState::InFlight(rx); + assert!(state.into_available().is_none()); + } + #[tokio::test] async fn get_or_resolve_resolves_stub_entry() { let resolver = TestResolver::new(); From 26b272122180adebc07ca2407aeeecdfb17dedad Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 18:10:25 -0800 Subject: [PATCH 34/57] refactor(icache): replace unreachable! with descriptive messages in entry_or_insert_icb --- src/fs/icache/async_cache.rs | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/src/fs/icache/async_cache.rs b/src/fs/icache/async_cache.rs index d011645..db9095b 100644 --- a/src/fs/icache/async_cache.rs +++ b/src/fs/icache/async_cache.rs @@ -187,7 +187,9 @@ impl AsyncICache { match self.inode_table.entry_async(ino).await { Entry::Occupied(mut occ) => match occ.get_mut() { IcbState::Available(icb) => { - let t = then_fn.take().unwrap_or_else(|| unreachable!()); + let t = then_fn + .take() + .unwrap_or_else(|| unreachable!("then_fn consumed more than once")); return t(icb); } IcbState::InFlight(rx) => { @@ -197,13 +199,16 @@ impl AsyncICache { } }, Entry::Vacant(vac) => { - let f = factory.take().unwrap_or_else(|| unreachable!()); - let mut occ = vac.insert_entry(IcbState::Available(f())); - if let IcbState::Available(icb) = occ.get_mut() { - let t = then_fn.take().unwrap_or_else(|| unreachable!()); - return t(icb); - } - unreachable!("just inserted Available"); + let f = factory + .take() + .unwrap_or_else(|| unreachable!("factory consumed more than once")); + let t = then_fn + .take() + .unwrap_or_else(|| unreachable!("then_fn consumed more than once")); + let mut icb = f(); + let result = t(&mut icb); + vac.insert_entry(IcbState::Available(icb)); + return result; } } } From 19d84f20832e1a1c188a1d993b6bb04cc9366924 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 18:11:09 -0800 Subject: [PATCH 35/57] feat: make opentelemetry deps optional behind __otlp_export feature --- Cargo.toml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 7798fd7..eab0bc2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,14 +42,15 @@ semver = "1.0" shellexpand = "3.1" inquire = "0.9.2" tracing-indicatif = "0.3.14" -opentelemetry = "0.29" -opentelemetry_sdk = { version = "0.29", features = ["rt-tokio"] } -opentelemetry-otlp = { version = "0.29", features = ["http-proto", "trace", "reqwest-blocking-client"] } -tracing-opentelemetry = "0.30" +opentelemetry = { version = "0.29", optional = true } +opentelemetry_sdk = { version = "0.29", features = ["rt-tokio"], optional = true } +opentelemetry-otlp = { version = "0.29", features = ["http-proto", "trace", "reqwest-blocking-client"], optional = true } +tracing-opentelemetry = { version = "0.30", optional = true } [features] default = [] staging = [] +__otlp_export = ["opentelemetry", "opentelemetry_sdk", "opentelemetry-otlp", "tracing-opentelemetry"] [build-dependencies] vergen-gitcl = { version = "1", features = [] } From e6e8bad663e576b0861c559a00350171051acffc Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 18:13:14 -0800 Subject: [PATCH 36/57] feat: gate OTLP tracing code behind __otlp_export feature flag --- src/trc.rs | 65 ++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 41 insertions(+), 24 deletions(-) diff --git a/src/trc.rs b/src/trc.rs index 40297d7..a504362 100644 --- a/src/trc.rs +++ b/src/trc.rs @@ -3,7 +3,9 @@ //! The tracing subscriber is built with a [`reload::Layer`] wrapping the fmt layer so that the //! output format can be switched at runtime (e.g. from pretty mode to ugly mode when daemonizing). +#[cfg(feature = "__otlp_export")] use opentelemetry::trace::TracerProvider as _; +#[cfg(feature = "__otlp_export")] use opentelemetry_sdk::Resource; use tracing_indicatif::IndicatifLayer; use tracing_subscriber::{ @@ -41,9 +43,11 @@ impl TrcMode { /// A handle that allows reconfiguring the tracing subscriber at runtime. pub struct TrcHandle { fmt_handle: FmtReloadHandle, + #[cfg(feature = "__otlp_export")] tracer_provider: Option, } +#[cfg(feature = "__otlp_export")] impl Drop for TrcHandle { fn drop(&mut self) { if let Some(provider) = self.tracer_provider.take() @@ -130,6 +134,7 @@ impl Trc { ); let (reload_layer, fmt_handle) = reload::Layer::new(initial_layer); + #[cfg(feature = "__otlp_export")] let mut tracer_provider = None; match self.mode { @@ -156,31 +161,42 @@ impl Trc { .try_init()?; } TrcMode::Ugly { .. } => { - let exporter = opentelemetry_otlp::SpanExporter::builder() - .with_http() - .build() - .ok(); - - if let Some(exporter) = exporter { - let provider = opentelemetry_sdk::trace::SdkTracerProvider::builder() - .with_batch_exporter(exporter) - .with_resource( - Resource::builder_empty() - .with_service_name("git-fs") - .build(), - ) - .build(); - let tracer = provider.tracer("git-fs"); - let otel_layer = tracing_opentelemetry::layer().with_tracer(tracer); - - tracing_subscriber::registry() - .with(reload_layer) - .with(otel_layer) - .with(self.env_filter) - .try_init()?; + #[cfg(feature = "__otlp_export")] + { + let exporter = opentelemetry_otlp::SpanExporter::builder() + .with_http() + .build() + .ok(); + + if let Some(exporter) = exporter { + let provider = opentelemetry_sdk::trace::SdkTracerProvider::builder() + .with_batch_exporter(exporter) + .with_resource( + Resource::builder_empty() + .with_service_name("git-fs") + .build(), + ) + .build(); + let tracer = provider.tracer("git-fs"); + let otel_layer = tracing_opentelemetry::layer().with_tracer(tracer); + + tracing_subscriber::registry() + .with(reload_layer) + .with(otel_layer) + .with(self.env_filter) + .try_init()?; + + tracer_provider = Some(provider); + } else { + tracing_subscriber::registry() + .with(reload_layer) + .with(self.env_filter) + .try_init()?; + } + } - tracer_provider = Some(provider); - } else { + #[cfg(not(feature = "__otlp_export"))] + { tracing_subscriber::registry() .with(reload_layer) .with(self.env_filter) @@ -191,6 +207,7 @@ impl Trc { Ok(TrcHandle { fmt_handle, + #[cfg(feature = "__otlp_export")] tracer_provider, }) } From 92391cd4d870af951682db5541b5a32251948a4e Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 18:55:45 -0800 Subject: [PATCH 37/57] refactor(icache): replace unreachable! with descriptive messages in get_or_resolve --- src/fs/icache/async_cache.rs | 22 +++++++++++++++------- src/fs/mescloud/composite.rs | 18 ++++++++++++++---- 2 files changed, 29 insertions(+), 11 deletions(-) diff --git a/src/fs/icache/async_cache.rs b/src/fs/icache/async_cache.rs index db9095b..ec0752d 100644 --- a/src/fs/icache/async_cache.rs +++ b/src/fs/icache/async_cache.rs @@ -238,7 +238,9 @@ impl AsyncICache { .inode_table .read_async(&ino, |_, s| match s { IcbState::Available(icb) if !icb.needs_resolve() => { - let t = then_fn.take().unwrap_or_else(|| unreachable!()); + let t = then_fn + .take() + .unwrap_or_else(|| unreachable!("then_fn consumed more than once")); Some(t(icb)) } IcbState::InFlight(_) | IcbState::Available(_) => None, @@ -254,21 +256,25 @@ impl AsyncICache { match self.inode_table.entry_async(ino).await { Entry::Occupied(mut occ) => match occ.get_mut() { IcbState::Available(icb) if !icb.needs_resolve() => { - let t = then_fn.take().unwrap_or_else(|| unreachable!()); + let t = then_fn + .take() + .unwrap_or_else(|| unreachable!("then_fn consumed more than once")); return Ok(t(icb)); } IcbState::Available(_) => { // Stub needing resolution — extract stub, replace with InFlight let (tx, rx) = watch::channel(()); let old = std::mem::replace(occ.get_mut(), IcbState::InFlight(rx)); - let IcbState::Available(stub) = old else { - unreachable!() - }; + let stub = old.into_available().unwrap_or_else(|| { + unreachable!("matched Available arm, replaced value must be Available") + }); drop(occ); // release shard lock before awaiting match self.resolver.resolve(ino, Some(stub), self).await { Ok(icb) => { - let t = then_fn.take().unwrap_or_else(|| unreachable!()); + let t = then_fn.take().unwrap_or_else(|| { + unreachable!("then_fn consumed more than once") + }); let result = t(&icb); self.inode_table .upsert_async(ino, IcbState::Available(icb)) @@ -295,7 +301,9 @@ impl AsyncICache { match self.resolver.resolve(ino, None, self).await { Ok(icb) => { - let t = then_fn.take().unwrap_or_else(|| unreachable!()); + let t = then_fn + .take() + .unwrap_or_else(|| unreachable!("then_fn consumed more than once")); let result = t(&icb); self.inode_table .upsert_async(ino, IcbState::Available(icb)) diff --git a/src/fs/mescloud/composite.rs b/src/fs/mescloud/composite.rs index 69d5f32..ca3c0ea 100644 --- a/src/fs/mescloud/composite.rs +++ b/src/fs/mescloud/composite.rs @@ -21,11 +21,21 @@ pub(super) struct ChildSlot { pub bridge: HashMapBridge, } -/// Generic compositing filesystem that delegates to child `Inner` filesystems. +/// Layered filesystem that presents multiple child filesystems under a single +/// inode namespace. /// -/// Holds the shared infrastructure (icache, file table, readdir buffer, child -/// slots) and implements all the delegation methods that `MesaFS` and `OrgFs` -/// previously duplicated. +/// MesaCloud's filesystem is a hierarchy of compositions: +/// +/// ```text +/// MesaFS (CompositeFs<_, OrgFs>) +/// └─ OrgFs (CompositeFs<_, RepoFs>) +/// └─ RepoFs (leaf — backed by git) +/// ``` +/// +/// Each child filesystem numbers its inodes starting from 1, so the composite +/// maintains a bidirectional inode/file-handle bridge per child (see +/// [`ChildSlot`]) to translate between the outer namespace visible to FUSE and +/// each child's internal namespace. pub(super) struct CompositeFs where R: IcbResolver, From 02052fc4a09f370c1c1954345ce40ac56cf342e5 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 18:57:50 -0800 Subject: [PATCH 38/57] deslop --- src/fs/icache/file_table.rs | 13 ------------- src/fs/mod.rs | 3 ++- 2 files changed, 2 insertions(+), 14 deletions(-) diff --git a/src/fs/icache/file_table.rs b/src/fs/icache/file_table.rs index 4596f93..332a6ff 100644 --- a/src/fs/icache/file_table.rs +++ b/src/fs/icache/file_table.rs @@ -20,16 +20,3 @@ impl FileTable { self.next_fh.fetch_add(1, Ordering::Relaxed) } } - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn allocate_returns_monotonic_handles() { - let ft = FileTable::new(); - assert_eq!(ft.allocate(), 1); - assert_eq!(ft.allocate(), 2); - assert_eq!(ft.allocate(), 3); - } -} diff --git a/src/fs/mod.rs b/src/fs/mod.rs index ef40322..c68cdee 100644 --- a/src/fs/mod.rs +++ b/src/fs/mod.rs @@ -1,5 +1,6 @@ pub mod fuser; pub mod icache; -pub mod local; +// TODO: re-enable after icache refactoring is complete +// pub mod local; pub mod mescloud; pub mod r#trait; From ef0eaaa61068f6b2b71623c749b0b63ad7bf8197 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 19:02:41 -0800 Subject: [PATCH 39/57] cargo fmt --- src/fs/icache/async_cache.rs | 107 ++++++++++++++++++++++++++++++----- src/fs/mescloud/composite.rs | 2 +- src/fs/mescloud/icache.rs | 2 +- src/fs/mescloud/mod.rs | 2 +- src/fs/mescloud/org.rs | 2 +- src/fs/mescloud/repo.rs | 2 +- 6 files changed, 99 insertions(+), 18 deletions(-) diff --git a/src/fs/icache/async_cache.rs b/src/fs/icache/async_cache.rs index ec0752d..2c1bded 100644 --- a/src/fs/icache/async_cache.rs +++ b/src/fs/icache/async_cache.rs @@ -322,20 +322,39 @@ impl AsyncICache { } } - /// Increment rc. **Awaits** `InFlight`. Panics if inode is missing. + /// Increment rc. **Awaits** `InFlight`. + /// Returns `None` if the inode does not exist or was evicted concurrently. #[instrument(name = "AsyncICache::inc_rc", skip(self))] - pub async fn inc_rc(&self, ino: Inode) -> u64 { - self.wait_for_available(ino).await; - self.inode_table - .update_async(&ino, |_, state| match state { - IcbState::Available(icb) => { - *icb.rc_mut() += 1; - icb.rc() + pub async fn inc_rc(&self, ino: Inode) -> Option { + loop { + if !self.wait_for_available(ino).await { + warn!(ino, "inc_rc: inode not in table"); + return None; + } + let result = self + .inode_table + .update_async(&ino, |_, state| match state { + IcbState::Available(icb) => { + *icb.rc_mut() += 1; + Some(icb.rc()) + } + IcbState::InFlight(_) => None, + }) + .await + .flatten(); + + match result { + Some(rc) => return Some(rc), + None => { + // Entry was concurrently replaced with InFlight or evicted. + if !self.contains(ino) { + warn!(ino, "inc_rc: inode evicted concurrently"); + return None; + } + // Entry exists but became InFlight — retry. } - IcbState::InFlight(_) => unreachable!("inc_rc after wait_for_available"), - }) - .await - .unwrap_or_else(|| unreachable!("inc_rc: inode {ino} not in table")) + } + } } /// Decrement rc by `nlookups`. If rc drops to zero, evicts and returns @@ -684,7 +703,7 @@ mod tests { ) .await; let new_rc = cache.inc_rc(42).await; - assert_eq!(new_rc, 2, "rc 1 + 1 = 2"); + assert_eq!(new_rc, Some(2), "rc 1 + 1 = 2"); } #[tokio::test] @@ -974,4 +993,66 @@ mod tests { let path: Result = cache.get_or_resolve(42, |icb| icb.path.clone()).await; assert_eq!(path, Ok(PathBuf::from("/resolved"))); } + + #[tokio::test] + async fn inc_rc_missing_inode_returns_none() { + let cache = test_cache(); + assert_eq!(cache.inc_rc(999).await, None); + } + + #[tokio::test] + async fn inc_rc_waits_for_inflight() { + let cache = Arc::new(test_cache()); + let (tx, rx) = watch::channel(()); + cache + .inode_table + .upsert_async(42, IcbState::InFlight(rx)) + .await; + + let cache2 = Arc::clone(&cache); + let handle = tokio::spawn(async move { cache2.inc_rc(42).await }); + + cache + .insert_icb( + 42, + TestIcb { + rc: 1, + path: "/a".into(), + resolved: true, + }, + ) + .await; + drop(tx); + + let result = handle + .await + .unwrap_or_else(|e| panic!("task panicked: {e}")); + assert_eq!( + result, + Some(2), + "waited for Available, then incremented 1 -> 2" + ); + } + + #[tokio::test] + async fn inc_rc_returns_none_after_concurrent_eviction() { + let cache = Arc::new(test_cache()); + let (tx, rx) = watch::channel(()); + cache + .inode_table + .upsert_async(42, IcbState::InFlight(rx)) + .await; + + let cache2 = Arc::clone(&cache); + let handle = tokio::spawn(async move { cache2.inc_rc(42).await }); + + // Evict instead of completing + cache.inode_table.remove_async(&42).await; + drop(tx); + + let result = handle + .await + .unwrap_or_else(|e| panic!("task panicked: {e}")); + assert_eq!(result, None, "evicted entry should return None"); + } } diff --git a/src/fs/mescloud/composite.rs b/src/fs/mescloud/composite.rs index ca3c0ea..a974f6e 100644 --- a/src/fs/mescloud/composite.rs +++ b/src/fs/mescloud/composite.rs @@ -261,7 +261,7 @@ where let outer_ino = self.translate_inner_ino(idx, inner_ino, parent, name).await; let outer_attr = self.slots[idx].bridge.attr_backward(inner_attr); self.icache.cache_attr(outer_ino, outer_attr).await; - let rc = self.icache.inc_rc(outer_ino).await; + let rc = self.icache.inc_rc(outer_ino).await.unwrap_or(0); trace!(outer_ino, inner_ino, rc, "lookup: resolved via delegation"); Ok(outer_attr) } diff --git a/src/fs/mescloud/icache.rs b/src/fs/mescloud/icache.rs index 9751dec..8700874 100644 --- a/src/fs/mescloud/icache.rs +++ b/src/fs/mescloud/icache.rs @@ -132,7 +132,7 @@ impl> MescloudICache { self.inner.entry_or_insert_icb(ino, factory, then).await } - pub async fn inc_rc(&self, ino: Inode) -> u64 { + pub async fn inc_rc(&self, ino: Inode) -> Option { self.inner.inc_rc(ino).await } diff --git a/src/fs/mescloud/mod.rs b/src/fs/mescloud/mod.rs index 2af3505..f65f4b1 100644 --- a/src/fs/mescloud/mod.rs +++ b/src/fs/mescloud/mod.rs @@ -260,7 +260,7 @@ impl Fs for MesaFS { trace!(org = org_name, "lookup: matched org"); let (ino, attr) = self.ensure_org_inode(org_idx).await; - let rc = self.composite.icache.inc_rc(ino).await; + let rc = self.composite.icache.inc_rc(ino).await.unwrap_or(0); trace!(ino, org = org_name, rc, "lookup: resolved org inode"); Ok(attr) } diff --git a/src/fs/mescloud/org.rs b/src/fs/mescloud/org.rs index ad76546..ab5fff7 100644 --- a/src/fs/mescloud/org.rs +++ b/src/fs/mescloud/org.rs @@ -370,7 +370,7 @@ impl Fs for OrgFs { let (ino, attr) = self .ensure_repo_inode(name_str, name_str, &repo.default_branch, Self::ROOT_INO) .await; - let rc = self.composite.icache.inc_rc(ino).await; + let rc = self.composite.icache.inc_rc(ino).await.unwrap_or(0); trace!(ino, repo = name_str, rc, "lookup: resolved repo inode"); Ok(attr) } diff --git a/src/fs/mescloud/repo.rs b/src/fs/mescloud/repo.rs index 4b65f77..138f587 100644 --- a/src/fs/mescloud/repo.rs +++ b/src/fs/mescloud/repo.rs @@ -262,7 +262,7 @@ impl Fs for RepoFs { .ok_or(LookupError::InodeNotFound)?; self.icache.cache_attr(ino, attr).await; - let rc = self.icache.inc_rc(ino).await; + let rc = self.icache.inc_rc(ino).await.unwrap_or(0); trace!(ino, ?name, rc, "resolved inode"); Ok(attr) } From 3211ca51271b75364c9a2bd98977dfb5cc6d90d8 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 19:05:12 -0800 Subject: [PATCH 40/57] clippy --- src/fs/mescloud/composite.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/fs/mescloud/composite.rs b/src/fs/mescloud/composite.rs index a974f6e..24ee4b3 100644 --- a/src/fs/mescloud/composite.rs +++ b/src/fs/mescloud/composite.rs @@ -24,7 +24,7 @@ pub(super) struct ChildSlot { /// Layered filesystem that presents multiple child filesystems under a single /// inode namespace. /// -/// MesaCloud's filesystem is a hierarchy of compositions: +/// `MesaCloud`'s filesystem is a hierarchy of compositions: /// /// ```text /// MesaFS (CompositeFs<_, OrgFs>) From 1ed29f84a931165b7b2be24b058fb5239f6dcf25 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 19:07:26 -0800 Subject: [PATCH 41/57] fix(icache): handle inc_rc race conditions gracefully instead of panicking inc_rc now returns Option and uses a retry loop to handle concurrent InFlight transitions. Callers propagate None as LookupError::InodeNotFound to maintain correct FUSE ref-count semantics. --- src/fs/icache/async_cache.rs | 7 +++++++ src/fs/mescloud/composite.rs | 8 +++++++- src/fs/mescloud/mod.rs | 7 ++++++- src/fs/mescloud/org.rs | 19 ++++++++++++++++--- src/fs/mescloud/repo.rs | 6 +++++- 5 files changed, 41 insertions(+), 6 deletions(-) diff --git a/src/fs/icache/async_cache.rs b/src/fs/icache/async_cache.rs index 2c1bded..7d90e8a 100644 --- a/src/fs/icache/async_cache.rs +++ b/src/fs/icache/async_cache.rs @@ -323,7 +323,14 @@ impl AsyncICache { } /// Increment rc. **Awaits** `InFlight`. + /// /// Returns `None` if the inode does not exist or was evicted concurrently. + /// This can happen when a concurrent `forget` removes the entry between the + /// caller's insert/cache and this `inc_rc` call, or when a concurrent + /// `get_or_resolve` swaps the entry to `InFlight` and the entry is then + /// evicted on resolution failure. Callers in FUSE `lookup` paths should + /// treat `None` as a lookup failure to avoid ref-count leaks (the kernel + /// would hold a reference the cache no longer tracks). #[instrument(name = "AsyncICache::inc_rc", skip(self))] pub async fn inc_rc(&self, ino: Inode) -> Option { loop { diff --git a/src/fs/mescloud/composite.rs b/src/fs/mescloud/composite.rs index 24ee4b3..83ba3a2 100644 --- a/src/fs/mescloud/composite.rs +++ b/src/fs/mescloud/composite.rs @@ -261,7 +261,13 @@ where let outer_ino = self.translate_inner_ino(idx, inner_ino, parent, name).await; let outer_attr = self.slots[idx].bridge.attr_backward(inner_attr); self.icache.cache_attr(outer_ino, outer_attr).await; - let rc = self.icache.inc_rc(outer_ino).await.unwrap_or(0); + // None means the entry was concurrently evicted; fail the lookup so + // the kernel doesn't hold a ref the cache no longer tracks. + let rc = self + .icache + .inc_rc(outer_ino) + .await + .ok_or(LookupError::InodeNotFound)?; trace!(outer_ino, inner_ino, rc, "lookup: resolved via delegation"); Ok(outer_attr) } diff --git a/src/fs/mescloud/mod.rs b/src/fs/mescloud/mod.rs index f65f4b1..59ddf8a 100644 --- a/src/fs/mescloud/mod.rs +++ b/src/fs/mescloud/mod.rs @@ -260,7 +260,12 @@ impl Fs for MesaFS { trace!(org = org_name, "lookup: matched org"); let (ino, attr) = self.ensure_org_inode(org_idx).await; - let rc = self.composite.icache.inc_rc(ino).await.unwrap_or(0); + let rc = self + .composite + .icache + .inc_rc(ino) + .await + .ok_or(LookupError::InodeNotFound)?; trace!(ino, org = org_name, rc, "lookup: resolved org inode"); Ok(attr) } diff --git a/src/fs/mescloud/org.rs b/src/fs/mescloud/org.rs index ab5fff7..47240d7 100644 --- a/src/fs/mescloud/org.rs +++ b/src/fs/mescloud/org.rs @@ -358,7 +358,11 @@ impl Fs for OrgFs { // name is an owner like "torvalds" — create lazily, no API validation. trace!(owner = name_str, "lookup: resolving github owner dir"); let (ino, attr) = self.ensure_owner_inode(name_str).await; - self.composite.icache.inc_rc(ino).await; + self.composite + .icache + .inc_rc(ino) + .await + .ok_or(LookupError::InodeNotFound)?; Ok(attr) } else { // Children of org root are repos. @@ -370,7 +374,12 @@ impl Fs for OrgFs { let (ino, attr) = self .ensure_repo_inode(name_str, name_str, &repo.default_branch, Self::ROOT_INO) .await; - let rc = self.composite.icache.inc_rc(ino).await.unwrap_or(0); + let rc = self + .composite + .icache + .inc_rc(ino) + .await + .ok_or(LookupError::InodeNotFound)?; trace!(ino, repo = name_str, rc, "lookup: resolved repo inode"); Ok(attr) } @@ -400,7 +409,11 @@ impl Fs for OrgFs { let (ino, attr) = self .ensure_repo_inode(&encoded, repo_name_str, &repo.default_branch, parent) .await; - self.composite.icache.inc_rc(ino).await; + self.composite + .icache + .inc_rc(ino) + .await + .ok_or(LookupError::InodeNotFound)?; Ok(attr) } InodeRole::RepoOwned => self.composite.delegated_lookup(parent, name).await, diff --git a/src/fs/mescloud/repo.rs b/src/fs/mescloud/repo.rs index 138f587..8b8b4e9 100644 --- a/src/fs/mescloud/repo.rs +++ b/src/fs/mescloud/repo.rs @@ -262,7 +262,11 @@ impl Fs for RepoFs { .ok_or(LookupError::InodeNotFound)?; self.icache.cache_attr(ino, attr).await; - let rc = self.icache.inc_rc(ino).await.unwrap_or(0); + let rc = self + .icache + .inc_rc(ino) + .await + .ok_or(LookupError::InodeNotFound)?; trace!(ino, ?name, rc, "resolved inode"); Ok(attr) } From dd0a6fe36092a735b8de260e97c1fb74e8e4c207 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 19:54:31 -0800 Subject: [PATCH 42/57] fix spurious bugs --- src/fs/icache/async_cache.rs | 225 ++++++++++++++++++++++++++++------- src/fs/icache/mod.rs | 2 +- src/fs/mescloud/composite.rs | 35 ++---- src/fs/mescloud/icache.rs | 114 ++++++++++++++++++ src/fs/mescloud/mod.rs | 14 ++- src/fs/mescloud/org.rs | 79 ++++++++---- src/fs/mescloud/repo.rs | 13 ++ 7 files changed, 393 insertions(+), 89 deletions(-) diff --git a/src/fs/icache/async_cache.rs b/src/fs/icache/async_cache.rs index 7d90e8a..d08001f 100644 --- a/src/fs/icache/async_cache.rs +++ b/src/fs/icache/async_cache.rs @@ -159,12 +159,37 @@ impl AsyncICache { .flatten() } - /// Insert an ICB directly as `Available` (overwrites any existing entry). + /// Insert an ICB directly as `Available`. If the entry is currently + /// `InFlight`, waits for resolution before overwriting. #[instrument(name = "AsyncICache::insert_icb", skip(self, icb))] pub async fn insert_icb(&self, ino: Inode, icb: R::Icb) { - self.inode_table - .upsert_async(ino, IcbState::Available(icb)) - .await; + use scc::hash_map::Entry; + let mut icb = Some(icb); + loop { + match self.inode_table.entry_async(ino).await { + Entry::Vacant(vac) => { + let val = icb + .take() + .unwrap_or_else(|| unreachable!("icb consumed more than once")); + vac.insert_entry(IcbState::Available(val)); + return; + } + Entry::Occupied(mut occ) => match occ.get_mut() { + IcbState::InFlight(rx) => { + let mut rx = rx.clone(); + drop(occ); + let _ = rx.changed().await; + } + IcbState::Available(_) => { + let val = icb + .take() + .unwrap_or_else(|| unreachable!("icb consumed more than once")); + *occ.get_mut() = IcbState::Available(val); + return; + } + }, + } + } } /// Get-or-insert pattern. If `ino` exists (awaits `InFlight`), runs `then` @@ -268,6 +293,7 @@ impl AsyncICache { let stub = old.into_available().unwrap_or_else(|| { unreachable!("matched Available arm, replaced value must be Available") }); + let fallback = stub.clone(); drop(occ); // release shard lock before awaiting match self.resolver.resolve(ino, Some(stub), self).await { @@ -283,7 +309,13 @@ impl AsyncICache { return Ok(result); } Err(e) => { - self.inode_table.remove_async(&ino).await; + if fallback.rc() > 0 { + self.inode_table + .upsert_async(ino, IcbState::Available(fallback)) + .await; + } else { + self.inode_table.remove_async(&ino).await; + } drop(tx); return Err(e); } @@ -368,36 +400,33 @@ impl AsyncICache { /// the ICB. **Awaits** `InFlight` entries. #[instrument(name = "AsyncICache::forget", skip(self))] pub async fn forget(&self, ino: Inode, nlookups: u64) -> Option { - if !self.wait_for_available(ino).await { - warn!(ino, "forget on unknown inode"); - return None; - } - - // Atomically remove if rc <= nlookups - let removed = self - .inode_table - .remove_if_async( - &ino, - |state| matches!(state, IcbState::Available(icb) if icb.rc() <= nlookups), - ) - .await; - - if let Some((_, IcbState::Available(icb))) = removed { - trace!(ino, "evicting inode"); - return Some(icb); - } + use scc::hash_map::Entry; - // Entry survives — decrement rc - self.inode_table - .update_async(&ino, |_, state| { - if let IcbState::Available(icb) = state { - *icb.rc_mut() -= nlookups; - trace!(ino, new_rc = icb.rc(), "decremented rc"); + loop { + match self.inode_table.entry_async(ino).await { + Entry::Occupied(mut occ) => match occ.get_mut() { + IcbState::Available(icb) => { + if icb.rc() <= nlookups { + trace!(ino, "evicting inode"); + let (_, state) = occ.remove_entry(); + return state.into_available(); + } + *icb.rc_mut() -= nlookups; + trace!(ino, new_rc = icb.rc(), "decremented rc"); + return None; + } + IcbState::InFlight(rx) => { + let mut rx = rx.clone(); + drop(occ); + let _ = rx.changed().await; + } + }, + Entry::Vacant(_) => { + warn!(ino, "forget on unknown inode"); + return None; } - }) - .await; - - None + } + } } /// Synchronous mutable access to an `Available` entry. @@ -618,6 +647,54 @@ mod tests { assert_eq!(cache.inode_count(), 2, "root + inserted = 2"); } + #[tokio::test] + async fn insert_icb_does_not_clobber_inflight() { + let cache = Arc::new(test_cache()); + let (tx, rx) = watch::channel(()); + cache + .inode_table + .upsert_async(42, IcbState::InFlight(rx)) + .await; + + // Spawn insert_icb in background — should wait for InFlight to resolve + let cache2 = Arc::clone(&cache); + let handle = tokio::spawn(async move { + cache2 + .insert_icb( + 42, + TestIcb { + rc: 5, + path: "/inserted".into(), + resolved: true, + }, + ) + .await; + }); + + // Give insert_icb time to start waiting + tokio::task::yield_now().await; + + // Complete the InFlight from the resolver side (write directly) + cache + .inode_table + .upsert_async( + 42, + IcbState::Available(TestIcb { + rc: 1, + path: "/resolved".into(), + resolved: true, + }), + ) + .await; + drop(tx); // signal watchers + + handle.await.expect("task panicked"); + + // After insert_icb completes, it should have overwritten the resolved value + let path = cache.get_icb(42, |icb| icb.path.clone()).await; + assert_eq!(path, Some(PathBuf::from("/inserted"))); + } + #[tokio::test] async fn entry_or_insert_creates_new() { let cache = test_cache(); @@ -823,15 +900,16 @@ mod tests { .upsert_async(42, IcbState::InFlight(rx)) .await; - // Complete before any waiter + // Complete before any waiter (simulate resolver by writing directly) cache - .insert_icb( + .inode_table + .upsert_async( 42, - TestIcb { + IcbState::Available(TestIcb { rc: 1, path: "/fast".into(), resolved: true, - }, + }), ) .await; drop(tx); @@ -1001,6 +1079,71 @@ mod tests { assert_eq!(path, Ok(PathBuf::from("/resolved"))); } + #[tokio::test] + async fn forget_handles_inflight_entry() { + let cache = Arc::new(test_cache()); + let (tx, rx) = watch::channel(()); + cache + .inode_table + .upsert_async(42, IcbState::InFlight(rx)) + .await; + + let cache2 = Arc::clone(&cache); + let handle = tokio::spawn(async move { cache2.forget(42, 1).await }); + + // Give forget time to start waiting + tokio::task::yield_now().await; + + // Simulate resolver completing (write directly to inode_table) + cache + .inode_table + .upsert_async( + 42, + IcbState::Available(TestIcb { + rc: 3, + path: "/inflight".into(), + resolved: true, + }), + ) + .await; + drop(tx); + + let evicted = handle.await.expect("task panicked"); + assert!(evicted.is_none(), "rc=3 - 1 = 2, should not evict"); + + let rc = cache.get_icb(42, IcbLike::rc).await; + assert_eq!(rc, Some(2), "rc should be 2 after forget(1) on rc=3"); + } + + #[tokio::test] + async fn get_or_resolve_error_preserves_stub_with_nonzero_rc() { + let resolver = TestResolver::new(); + resolver.add_err(42, "resolve failed"); + let cache = test_cache_with(resolver); + + // Insert a stub with rc=2 (simulates a looked-up entry needing resolution) + cache + .insert_icb( + 42, + TestIcb { + rc: 2, + path: "/stub".into(), + resolved: false, + }, + ) + .await; + + // get_or_resolve should fail + let result: Result = + cache.get_or_resolve(42, |icb| icb.path.clone()).await; + assert!(result.is_err(), "should propagate resolver error"); + + // The stub should be preserved since rc > 0 + assert!(cache.contains(42), "entry with rc=2 should survive error"); + let rc = cache.get_icb(42, IcbLike::rc).await; + assert_eq!(rc, Some(2), "rc should be preserved"); + } + #[tokio::test] async fn inc_rc_missing_inode_returns_none() { let cache = test_cache(); @@ -1019,14 +1162,16 @@ mod tests { let cache2 = Arc::clone(&cache); let handle = tokio::spawn(async move { cache2.inc_rc(42).await }); + // Simulate resolver completing by writing directly to inode_table cache - .insert_icb( + .inode_table + .upsert_async( 42, - TestIcb { + IcbState::Available(TestIcb { rc: 1, path: "/a".into(), resolved: true, - }, + }), ) .await; drop(tx); diff --git a/src/fs/icache/mod.rs b/src/fs/icache/mod.rs index 20fa221..2ccd80b 100644 --- a/src/fs/icache/mod.rs +++ b/src/fs/icache/mod.rs @@ -11,7 +11,7 @@ pub use file_table::FileTable; pub use inode_factory::InodeFactory; /// Common interface for inode control block types usable with `ICache`. -pub trait IcbLike { +pub trait IcbLike: Clone { /// Create an ICB with rc=1, the given path, and no children. fn new_root(path: std::path::PathBuf) -> Self; fn rc(&self) -> u64; diff --git a/src/fs/mescloud/composite.rs b/src/fs/mescloud/composite.rs index 83ba3a2..0b1ec7d 100644 --- a/src/fs/mescloud/composite.rs +++ b/src/fs/mescloud/composite.rs @@ -45,6 +45,8 @@ where pub readdir_buf: Vec, /// Maps outer inode to index into `slots` for child-root inodes. pub child_inodes: HashMap, + /// Maps every translated outer inode to its owning slot index. + pub inode_to_slot: HashMap, pub slots: Vec>, } @@ -62,24 +64,10 @@ where + Send + Sync, { - /// Walk the parent chain to find which child slot owns an inode. + /// Look up which child slot owns an inode via direct map. #[instrument(name = "CompositeFs::slot_for_inode", skip(self))] - pub async fn slot_for_inode(&self, ino: Inode) -> Option { - if let Some(&idx) = self.child_inodes.get(&ino) { - return Some(idx); - } - let mut current = ino; - loop { - let parent = self - .icache - .get_icb(current, |icb| icb.parent) - .await - .flatten()?; - if let Some(&idx) = self.child_inodes.get(&parent) { - return Some(idx); - } - current = parent; - } + pub fn slot_for_inode(&self, ino: Inode) -> Option { + self.inode_to_slot.get(&ino).copied() } /// Allocate an outer file handle and map it through the bridge. @@ -103,6 +91,7 @@ where let outer_ino = self.slots[slot_idx] .bridge .backward_or_insert_inode(inner_ino, || self.icache.allocate_inode()); + self.inode_to_slot.insert(outer_ino, slot_idx); self.icache .entry_or_insert_icb( outer_ino, @@ -135,7 +124,7 @@ where ino: Inode, flags: OpenFlags, ) -> Result { - let idx = self.slot_for_inode(ino).await.ok_or_else(|| { + let idx = self.slot_for_inode(ino).ok_or_else(|| { warn!(ino, "open on inode not belonging to any child"); OpenError::InodeNotFound })?; @@ -168,7 +157,7 @@ where flags: OpenFlags, lock_owner: Option, ) -> Result { - let idx = self.slot_for_inode(ino).await.ok_or_else(|| { + let idx = self.slot_for_inode(ino).ok_or_else(|| { warn!(ino, "read on inode not belonging to any child"); ReadError::InodeNotFound })?; @@ -195,7 +184,7 @@ where flags: OpenFlags, flush: bool, ) -> Result<(), ReleaseError> { - let idx = self.slot_for_inode(ino).await.ok_or_else(|| { + let idx = self.slot_for_inode(ino).ok_or_else(|| { warn!(ino, "release on inode not belonging to any child"); ReleaseError::FileNotOpen })?; @@ -220,13 +209,14 @@ where #[must_use] #[instrument(name = "CompositeFs::delegated_forget", skip(self))] pub async fn delegated_forget(&mut self, ino: Inode, nlookups: u64) -> bool { - if let Some(idx) = self.slot_for_inode(ino).await + if let Some(idx) = self.slot_for_inode(ino) && let Some(&inner_ino) = self.slots[idx].bridge.inode_map_get_by_left(ino) { self.slots[idx].inner.forget(inner_ino, nlookups).await; } if self.icache.forget(ino, nlookups).await.is_some() { self.child_inodes.remove(&ino); + self.inode_to_slot.remove(&ino); for slot in &mut self.slots { slot.bridge.remove_inode_by_left(ino); } @@ -251,7 +241,6 @@ where ) -> Result { let idx = self .slot_for_inode(parent) - .await .ok_or(LookupError::InodeNotFound)?; let inner_parent = self.slots[idx] .bridge @@ -277,13 +266,13 @@ where pub async fn delegated_readdir(&mut self, ino: Inode) -> Result<&[DirEntry], ReadDirError> { let idx = self .slot_for_inode(ino) - .await .ok_or(ReadDirError::InodeNotFound)?; let inner_ino = self.slots[idx] .bridge .forward_or_insert_inode(ino, || unreachable!("readdir: ino should be mapped")); let inner_entries = self.slots[idx].inner.readdir(inner_ino).await?; let inner_entries: Vec = inner_entries.to_vec(); + self.icache.evict_zero_rc_children(ino).await; let mut outer_entries = Vec::with_capacity(inner_entries.len()); for entry in &inner_entries { let outer_child_ino = self diff --git a/src/fs/mescloud/icache.rs b/src/fs/mescloud/icache.rs index 8700874..bae1b5e 100644 --- a/src/fs/mescloud/icache.rs +++ b/src/fs/mescloud/icache.rs @@ -9,6 +9,7 @@ use crate::fs::r#trait::{ }; /// Inode control block for mescloud filesystem layers. +#[derive(Clone)] pub struct InodeControlBlock { pub parent: Option, pub rc: u64, @@ -191,6 +192,22 @@ impl> MescloudICache { } } + /// Evict all `Available` children of `parent` that have `rc == 0`. + /// Returns the number of evicted entries. + pub async fn evict_zero_rc_children(&self, parent: Inode) -> usize { + let mut to_evict = Vec::new(); + self.inner.for_each(|&ino, icb| { + if icb.rc == 0 && icb.parent == Some(parent) { + to_evict.push(ino); + } + }); + let count = to_evict.len(); + for ino in to_evict { + self.inner.forget(ino, 0).await; + } + count + } + /// Find an existing child by (parent, name) or allocate a new inode. /// If new, inserts a stub ICB (parent+path set, attr=None, children=None, rc=0). /// Does NOT bump rc. Returns the inode number. @@ -227,7 +244,10 @@ impl> MescloudICache { #[cfg(test)] mod tests { + use std::future::Future; + use super::*; + use crate::fs::icache::async_cache::AsyncICache; use crate::fs::r#trait::DirEntryType; fn dummy_dir_attr(ino: Inode) -> FileAttr { @@ -305,4 +325,98 @@ mod tests { }; assert!(!icb.needs_resolve()); } + + struct NoOpResolver; + + impl IcbResolver for NoOpResolver { + type Icb = InodeControlBlock; + type Error = std::convert::Infallible; + + #[expect( + clippy::manual_async_fn, + reason = "must match IcbResolver trait signature" + )] + fn resolve( + &self, + _ino: Inode, + _stub: Option, + _cache: &AsyncICache, + ) -> impl Future> + Send { + async { unreachable!("NoOpResolver should not be called") } + } + } + + fn test_mescloud_cache() -> MescloudICache { + MescloudICache::new(NoOpResolver, 1, (0, 0), 4096) + } + + #[tokio::test] + async fn evict_zero_rc_children_removes_stubs() { + let cache = test_mescloud_cache(); + + // Insert stubs as children of root (ino=1) with rc=0 + cache + .insert_icb( + 10, + InodeControlBlock { + rc: 0, + path: "child_a".into(), + parent: Some(1), + attr: None, + children: None, + }, + ) + .await; + cache + .insert_icb( + 11, + InodeControlBlock { + rc: 0, + path: "child_b".into(), + parent: Some(1), + attr: None, + children: None, + }, + ) + .await; + + // Insert a child with rc > 0 — should survive + cache + .insert_icb( + 12, + InodeControlBlock { + rc: 1, + path: "active".into(), + parent: Some(1), + attr: None, + children: None, + }, + ) + .await; + + // Insert a stub under a different parent — should survive + cache + .insert_icb( + 20, + InodeControlBlock { + rc: 0, + path: "other".into(), + parent: Some(12), + attr: None, + children: None, + }, + ) + .await; + + let evicted = cache.evict_zero_rc_children(1).await; + assert_eq!(evicted, 2, "should evict 2 zero-rc children of root"); + + assert!(!cache.contains(10), "child_a should be evicted"); + assert!(!cache.contains(11), "child_b should be evicted"); + assert!(cache.contains(12), "active child should survive"); + assert!( + cache.contains(20), + "child of different parent should survive" + ); + } } diff --git a/src/fs/mescloud/mod.rs b/src/fs/mescloud/mod.rs index 59ddf8a..88356b5 100644 --- a/src/fs/mescloud/mod.rs +++ b/src/fs/mescloud/mod.rs @@ -119,6 +119,7 @@ impl MesaFS { file_table: FileTable::new(), readdir_buf: Vec::new(), child_inodes: HashMap::new(), + inode_to_slot: HashMap::new(), slots: orgs .map(|org_conf| { let client = MesaClient::builder() @@ -137,14 +138,14 @@ impl MesaFS { } /// Classify an inode by its role. - async fn inode_role(&self, ino: Inode) -> InodeRole { + fn inode_role(&self, ino: Inode) -> InodeRole { if ino == Self::ROOT_NODE_INO { return InodeRole::Root; } if self.composite.child_inodes.contains_key(&ino) { return InodeRole::OrgOwned; } - if self.composite.slot_for_inode(ino).await.is_some() { + if self.composite.slot_for_inode(ino).is_some() { return InodeRole::OrgOwned; } debug_assert!(false, "inode {ino} not found in any org slot"); @@ -216,8 +217,11 @@ impl MesaFS { .await; self.composite.child_inodes.insert(ino, org_idx); + self.composite.inode_to_slot.insert(ino, org_idx); - // Seed bridge: mesa org-root <-> OrgFs::ROOT_INO. + // Reset bridge (may have stale mappings from a previous eviction cycle) + // and seed: mesa org-root <-> OrgFs::ROOT_INO. + self.composite.slots[org_idx].bridge = HashMapBridge::new(); self.composite.slots[org_idx] .bridge .insert_inode(ino, OrgFs::ROOT_INO); @@ -248,7 +252,7 @@ impl Fs for MesaFS { #[instrument(name = "MesaFS::lookup", skip(self))] async fn lookup(&mut self, parent: Inode, name: &OsStr) -> Result { - match self.inode_role(parent).await { + match self.inode_role(parent) { InodeRole::Root => { let org_name = name.to_str().ok_or(LookupError::InodeNotFound)?; let org_idx = self @@ -284,7 +288,7 @@ impl Fs for MesaFS { #[instrument(name = "MesaFS::readdir", skip(self))] async fn readdir(&mut self, ino: Inode) -> Result<&[DirEntry], ReadDirError> { - match self.inode_role(ino).await { + match self.inode_role(ino) { InodeRole::Root => { let org_info: Vec<(usize, String)> = self .composite diff --git a/src/fs/mescloud/org.rs b/src/fs/mescloud/org.rs index 47240d7..bb01a2f 100644 --- a/src/fs/mescloud/org.rs +++ b/src/fs/mescloud/org.rs @@ -189,6 +189,7 @@ impl OrgFs { file_table: FileTable::new(), readdir_buf: Vec::new(), child_inodes: HashMap::new(), + inode_to_slot: HashMap::new(), slots: Vec::new(), }, owner_inodes: HashMap::new(), @@ -196,7 +197,7 @@ impl OrgFs { } /// Classify an inode by its role. - async fn inode_role(&self, ino: Inode) -> InodeRole { + fn inode_role(&self, ino: Inode) -> InodeRole { if ino == Self::ROOT_INO { return InodeRole::OrgRoot; } @@ -206,7 +207,7 @@ impl OrgFs { if self.composite.child_inodes.contains_key(&ino) { return InodeRole::RepoOwned; } - if self.composite.slot_for_inode(ino).await.is_some() { + if self.composite.slot_for_inode(ino).is_some() { return InodeRole::RepoOwned; } debug_assert!(false, "inode {ino} not found in any repo slot"); @@ -239,29 +240,26 @@ impl OrgFs { trace!(ino, repo = repo_name, rc, "ensure_repo_inode: reusing"); return (ino, attr); } - // Attr missing — rebuild. warn!( ino, repo = repo_name, "ensure_repo_inode: attr missing, rebuilding" ); - let now = SystemTime::now(); - let attr = FileAttr::Directory { - common: mescloud_icache::make_common_file_attr( - ino, - 0o755, - now, - now, - self.composite.icache.fs_owner(), - self.composite.icache.block_size(), - ), - }; - self.composite.icache.cache_attr(ino, attr).await; - return (ino, attr); + return self.make_repo_dir_attr(ino).await; } } - // Allocate new. + // Check for orphaned slot (slot exists but not in child_inodes). + if let Some(idx) = self + .composite + .slots + .iter() + .position(|s| s.inner.repo_name() == repo_name) + { + return self.register_repo_slot(idx, display_name, parent_ino).await; + } + + // Allocate truly new slot. let ino = self.composite.icache.allocate_inode(); trace!( ino, @@ -269,7 +267,6 @@ impl OrgFs { "ensure_repo_inode: allocated new inode" ); - let now = SystemTime::now(); self.composite .icache .insert_icb( @@ -301,7 +298,49 @@ impl OrgFs { bridge, }); self.composite.child_inodes.insert(ino, idx); + self.composite.inode_to_slot.insert(ino, idx); + self.make_repo_dir_attr(ino).await + } + + /// Allocate a new inode, register it in an existing (orphaned) slot, and + /// return `(ino, attr)`. + async fn register_repo_slot( + &mut self, + idx: usize, + display_name: &str, + parent_ino: Inode, + ) -> (Inode, FileAttr) { + let ino = self.composite.icache.allocate_inode(); + trace!(ino, idx, "register_repo_slot: reusing orphaned slot"); + + self.composite + .icache + .insert_icb( + ino, + InodeControlBlock { + rc: 0, + path: display_name.into(), + parent: Some(parent_ino), + attr: None, + children: None, + }, + ) + .await; + + self.composite.slots[idx].bridge = HashMapBridge::new(); + self.composite.slots[idx] + .bridge + .insert_inode(ino, RepoFs::ROOT_INO); + self.composite.child_inodes.insert(ino, idx); + self.composite.inode_to_slot.insert(ino, idx); + + self.make_repo_dir_attr(ino).await + } + + /// Build and cache a directory attr for `ino`, returning `(ino, attr)`. + async fn make_repo_dir_attr(&self, ino: Inode) -> (Inode, FileAttr) { + let now = SystemTime::now(); let attr = FileAttr::Directory { common: mescloud_icache::make_common_file_attr( ino, @@ -349,7 +388,7 @@ impl Fs for OrgFs { #[instrument(name = "OrgFs::lookup", skip(self), fields(org = %self.name))] async fn lookup(&mut self, parent: Inode, name: &OsStr) -> Result { - match self.inode_role(parent).await { + match self.inode_role(parent) { InodeRole::OrgRoot => { // TODO(MES-674): Cleanup "special" casing for github. let name_str = name.to_str().ok_or(LookupError::InodeNotFound)?; @@ -431,7 +470,7 @@ impl Fs for OrgFs { #[instrument(name = "OrgFs::readdir", skip(self), fields(org = %self.name))] async fn readdir(&mut self, ino: Inode) -> Result<&[DirEntry], ReadDirError> { - match self.inode_role(ino).await { + match self.inode_role(ino) { InodeRole::OrgRoot => { // TODO(MES-674): Cleanup "special" casing for github. if self.is_github() { diff --git a/src/fs/mescloud/repo.rs b/src/fs/mescloud/repo.rs index 8b8b4e9..b577921 100644 --- a/src/fs/mescloud/repo.rs +++ b/src/fs/mescloud/repo.rs @@ -58,6 +58,11 @@ impl IcbResolver for RepoResolver { let stub = stub.unwrap_or_else(|| unreachable!("RepoResolver requires a stub ICB")); let file_path = build_repo_path(stub.parent, &stub.path, cache, RepoFs::ROOT_INO).await; + // Non-root inodes must have a resolvable path. + if stub.parent.is_some() && file_path.is_none() { + return Err(LookupError::InodeNotFound); + } + let content = client .org(&org_name) .repos() @@ -309,6 +314,8 @@ impl Fs for RepoFs { "readdir: resolved directory listing from icache" ); + self.icache.evict_zero_rc_children(ino).await; + let mut entries = Vec::with_capacity(children.len()); for (name, kind) in &children { let child_ino = self.icache.ensure_child_ino(ino, OsStr::new(name)).await; @@ -404,6 +411,12 @@ impl Fs for RepoFs { let file_path = self.path_of_inode(ino).await; + // Non-root inodes must have a resolvable path. + if ino != Self::ROOT_INO && file_path.is_none() { + warn!(ino, "read: path_of_inode returned None for non-root inode"); + return Err(ReadError::InodeNotFound); + } + let content = self .client .org(&self.org_name) From df348b4d8268b6ad6e4293a58500b4dd1b6bd237 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 20:22:48 -0800 Subject: [PATCH 43/57] fix(icache): prevent upsert_async from resurrecting evicted entries --- src/fs/icache/async_cache.rs | 125 ++++++++++++++++++++++++++++++++--- 1 file changed, 116 insertions(+), 9 deletions(-) diff --git a/src/fs/icache/async_cache.rs b/src/fs/icache/async_cache.rs index d08001f..be33257 100644 --- a/src/fs/icache/async_cache.rs +++ b/src/fs/icache/async_cache.rs @@ -239,6 +239,26 @@ impl AsyncICache { } } + /// Write an ICB back to the table only if the entry still exists. + /// + /// If the entry was evicted (vacant) during resolution, the result is + /// silently dropped — this prevents resurrecting entries that a concurrent + /// `forget` has already removed. + async fn write_back_if_present(&self, ino: Inode, icb: R::Icb) { + use scc::hash_map::Entry; + match self.inode_table.entry_async(ino).await { + Entry::Occupied(mut occ) => { + *occ.get_mut() = IcbState::Available(icb); + } + Entry::Vacant(_) => { + tracing::debug!( + ino, + "resolved inode was evicted during resolution, dropping result" + ); + } + } + } + /// Look up `ino`. If `Available` and fully resolved, run `then` and return /// `Ok(T)`. If `Available` but `needs_resolve()` is true (stub), extract /// the stub, resolve it, cache the result, then run `then`. If absent, call @@ -302,17 +322,13 @@ impl AsyncICache { unreachable!("then_fn consumed more than once") }); let result = t(&icb); - self.inode_table - .upsert_async(ino, IcbState::Available(icb)) - .await; + self.write_back_if_present(ino, icb).await; drop(tx); return Ok(result); } Err(e) => { if fallback.rc() > 0 { - self.inode_table - .upsert_async(ino, IcbState::Available(fallback)) - .await; + self.write_back_if_present(ino, fallback).await; } else { self.inode_table.remove_async(&ino).await; } @@ -337,9 +353,7 @@ impl AsyncICache { .take() .unwrap_or_else(|| unreachable!("then_fn consumed more than once")); let result = t(&icb); - self.inode_table - .upsert_async(ino, IcbState::Available(icb)) - .await; + self.write_back_if_present(ino, icb).await; drop(tx); return Ok(result); } @@ -1207,4 +1221,97 @@ mod tests { .unwrap_or_else(|e| panic!("task panicked: {e}")); assert_eq!(result, None, "evicted entry should return None"); } + + /// Resolver that pauses mid-resolution via a `Notify`, allowing the test + /// to interleave a `forget` while the resolve future is suspended. + struct SlowResolver { + /// Signalled by the resolver once it has started (so the test knows + /// resolution is in progress). + started: Arc, + /// The resolver waits on this before returning (the test signals it + /// after calling `forget`). + proceed: Arc, + } + + impl IcbResolver for SlowResolver { + type Icb = TestIcb; + type Error = String; + + fn resolve( + &self, + _ino: Inode, + _stub: Option, + _cache: &AsyncICache, + ) -> impl Future> + Send { + let started = Arc::clone(&self.started); + let proceed = Arc::clone(&self.proceed); + async move { + started.notify_one(); + proceed.notified().await; + Ok(TestIcb { + rc: 1, + path: "/slow-resolved".into(), + resolved: true, + }) + } + } + } + + /// Regression test: an entry evicted by `forget` during an in-progress + /// `get_or_resolve` must NOT be resurrected when resolution completes. + #[tokio::test] + async fn get_or_resolve_does_not_resurrect_evicted_entry() { + let started = Arc::new(tokio::sync::Notify::new()); + let proceed = Arc::new(tokio::sync::Notify::new()); + + let cache = Arc::new(AsyncICache::new( + SlowResolver { + started: Arc::clone(&started), + proceed: Arc::clone(&proceed), + }, + 1, + "/root", + )); + + let ino: Inode = 42; + + // Insert a stub with rc=1 (simulates a looked-up, unresolved entry). + cache + .insert_icb( + ino, + TestIcb { + rc: 1, + path: "/stub".into(), + resolved: false, + }, + ) + .await; + + // Spawn get_or_resolve which will trigger slow resolution. + let cache2 = Arc::clone(&cache); + let resolve_handle = + tokio::spawn(async move { cache2.get_or_resolve(ino, |icb| icb.path.clone()).await }); + + // Wait until the resolver has started (entry is now InFlight). + started.notified().await; + + // Evict the entry while resolution is in progress. + // forget waits for InFlight, so we need to complete resolution for + // forget to proceed. Instead, remove the InFlight entry directly to + // simulate a concurrent eviction (e.g., by another path that already + // removed the entry). + cache.inode_table.remove_async(&ino).await; + + // Let the resolver finish. + proceed.notify_one(); + + // Wait for get_or_resolve to complete. + drop(resolve_handle.await.expect("task panicked")); + + // The entry must NOT have been resurrected by write_back_if_present. + assert!( + !cache.contains(ino), + "evicted entry must not be resurrected after resolution completes" + ); + } } From 48d1f2cfbd981ef529455fd5a523cf1486fc8769 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 21:05:00 -0800 Subject: [PATCH 44/57] fix(icache): loop in wait_for_available and get_icb on re-encountering InFlight --- .../plans/2025-02-10-async-mescloud-icache.md | 915 ++++++++++++++++++ docs/plans/2026-02-03-update-checker.md | 217 +++++ .../2026-02-04-mount-directory-interlock.md | 185 ++++ docs/plans/2026-02-05-homebrew-tap-formula.md | 281 ++++++ docs/plans/2026-02-06-decouple-dcache-icb.md | 306 ++++++ .../2026-02-06-default-run-subcommand.md | 70 ++ .../plans/2026-02-09-async-icache-resolver.md | 858 ++++++++++++++++ docs/plans/2026-02-09-mesa-dev-migration.md | 469 +++++++++ docs/plans/2026-02-09-shellcheck-workflow.md | 131 +++ docs/plans/2026-02-10-composite-fs-dedup.md | 670 +++++++++++++ docs/plans/2026-02-10-file-table.md | 280 ++++++ ...2026-02-10-pr31-async-icache-bug-review.md | 526 ++++++++++ docs/plans/2026-02-10-pr31-bug-review.md | 79 ++ .../2026-02-10-readdir-icache-caching.md | 572 +++++++++++ .../2026-02-10-resolver-as-data-provider.md | 334 +++++++ docs/plans/2026-02-10-split-async-contains.md | 287 ++++++ src/fs/icache/async_cache.rs | 166 +++- src/fs/mescloud/icache.rs | 3 +- 18 files changed, 6308 insertions(+), 41 deletions(-) create mode 100644 docs/plans/2025-02-10-async-mescloud-icache.md create mode 100644 docs/plans/2026-02-03-update-checker.md create mode 100644 docs/plans/2026-02-04-mount-directory-interlock.md create mode 100644 docs/plans/2026-02-05-homebrew-tap-formula.md create mode 100644 docs/plans/2026-02-06-decouple-dcache-icb.md create mode 100644 docs/plans/2026-02-06-default-run-subcommand.md create mode 100644 docs/plans/2026-02-09-async-icache-resolver.md create mode 100644 docs/plans/2026-02-09-mesa-dev-migration.md create mode 100644 docs/plans/2026-02-09-shellcheck-workflow.md create mode 100644 docs/plans/2026-02-10-composite-fs-dedup.md create mode 100644 docs/plans/2026-02-10-file-table.md create mode 100644 docs/plans/2026-02-10-pr31-async-icache-bug-review.md create mode 100644 docs/plans/2026-02-10-pr31-bug-review.md create mode 100644 docs/plans/2026-02-10-readdir-icache-caching.md create mode 100644 docs/plans/2026-02-10-resolver-as-data-provider.md create mode 100644 docs/plans/2026-02-10-split-async-contains.md diff --git a/docs/plans/2025-02-10-async-mescloud-icache.md b/docs/plans/2025-02-10-async-mescloud-icache.md new file mode 100644 index 0000000..559ef78 --- /dev/null +++ b/docs/plans/2025-02-10-async-mescloud-icache.md @@ -0,0 +1,915 @@ +# Async Mescloud ICache Migration + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Rewrite `src/fs/mescloud/icache.rs` to use `AsyncICache`, with resolvers that fetch from the mesa backend. + +**Architecture:** `MescloudICache` becomes generic over `R: IcbResolver`. Each filesystem layer provides its own resolver: `RepoResolver` fetches file/directory metadata from the mesa content API, `OrgResolver` validates repos via the repo API, and `MesaResolver` creates static org directory entries. The `IcbLike` trait gains a `needs_resolve()` method so the cache can distinguish "stub" entries (parent+path known, attr not yet fetched) from fully resolved entries. `get_or_resolve` handles both missing entries and stubs that need resolution. + +**Tech Stack:** Rust, `scc::HashMap` (already in Cargo.toml), `tokio::sync::watch`, `async_trait`, `mesa_dev::MesaClient` + +--- + +## Key Design Decisions + +### Stub + Resolve pattern + +When `lookup(parent, name)` is called, the flow is: +1. `ensure_child_ino(parent, name)` — find existing child or allocate a new inode; insert a **stub** ICB (`parent` + `path` set, `attr: None`) if the entry is new. +2. `get_or_resolve(ino, |icb| icb.attr)` — if the stub's `needs_resolve()` returns true (attr is None), the cache transitions the entry to `InFlight`, calls the resolver, and transitions to `Available` once the resolver returns a fully populated ICB with attr. +3. Concurrent callers for the same inode coalesce: they see `InFlight` and wait. + +This lets the resolver access the stub's `parent` and `path` fields to build the API path, without needing a separate context parameter. + +### Resolver receives `stub: Option` + `cache: &AsyncICache` + +The resolver signature is: +```rust +fn resolve(&self, ino: Inode, stub: Option, cache: &AsyncICache) -> Future> +``` + +- `stub`: `Some(icb)` when upgrading a stub entry, `None` when creating from scratch. +- `cache`: lets the resolver walk the parent chain to build paths (e.g., `cache.get_icb(parent, |icb| icb.path.clone())`). + +### `get_or_resolve` handles stubs + +The existing `get_or_resolve` only resolves **missing** entries. We extend it to also resolve **Available entries where `icb.needs_resolve()` is true**: + +| Current state | Action | +|---|---| +| Available + `!needs_resolve()` | Return immediately (fast path) | +| Available + `needs_resolve()` | Extract stub → InFlight → resolve → Available | +| InFlight | Wait for resolution | +| Vacant | InFlight → resolve → Available | + +### `readdir` bypasses the resolver + +`readdir` makes a single API call that returns all children. It inserts fully-populated ICBs directly via `insert_icb` (not via the resolver). This is a batch optimization — the resolver is for per-inode resolution. + +### `MescloudICache` is generic + +```rust +pub struct MescloudICache> { + inner: AsyncICache, + inode_factory: InodeFactory, + fs_owner: (u32, u32), + block_size: u32, +} +``` + +Each FS layer instantiates with its own resolver: +- `RepoFs` → `MescloudICache` — resolver calls mesa content API +- `OrgFs` → `MescloudICache` — resolver validates repos + creates directory attrs +- `MesaFS` → `MescloudICache` — resolver creates static directory attrs + +### `readdir_buf` replaces `children` + +The `Fs::readdir` trait returns `&[DirEntry]` borrowed from `&mut self`. The async cache's closure-based API can't return references that outlive the closure. Each FS struct gets a `readdir_buf: Vec` field. The `children` field on `InodeControlBlock` is removed. + +### `make_common_file_attr` becomes a free function + +Currently a method on `MescloudICache` (uses `self.fs_owner`, `self.block_size`). Becomes a free function so resolvers can call it too. + +--- + +## Task 1: Extend IcbLike trait with `needs_resolve` + +**Files:** +- Modify: `src/fs/icache/mod.rs` + +**Step 1: Add `needs_resolve` to IcbLike** + +```rust +pub trait IcbLike { + fn new_root(path: std::path::PathBuf) -> Self; + fn rc(&self) -> u64; + fn rc_mut(&mut self) -> &mut u64; + /// Returns true if this entry needs resolution (e.g., attr not yet fetched). + fn needs_resolve(&self) -> bool; +} +``` + +**Step 2: Update existing IcbLike implementations** + +In `src/fs/local.rs`, the `InodeControlBlock` for local FS: +```rust +fn needs_resolve(&self) -> bool { + false // local FS entries are always fully resolved +} +``` + +In `src/fs/mescloud/icache.rs`: +```rust +fn needs_resolve(&self) -> bool { + self.attr.is_none() +} +``` + +In `src/fs/icache/async_cache.rs` tests, the `TestIcb`: +```rust +fn needs_resolve(&self) -> bool { + false +} +``` + +**Step 3: Verify compilation** + +Run: `cargo check -p git-fs` + +**Step 4: Commit** + +```bash +git add src/fs/icache/mod.rs src/fs/local.rs src/fs/mescloud/icache.rs src/fs/icache/async_cache.rs +git commit -m "feat(icache): add needs_resolve to IcbLike trait" +``` + +--- + +## Task 2: Modify IcbResolver trait and AsyncICache + +**Files:** +- Modify: `src/fs/icache/async_cache.rs` +- Modify: `src/fs/icache/mod.rs` + +**Step 1: Update IcbResolver trait** + +In `async_cache.rs`, change the resolver to receive stub data and cache reference: + +```rust +pub trait IcbResolver: Send + Sync { + type Icb: IcbLike + Send + Sync; + type Error: Send; + + /// Resolve an inode to a fully-populated control block. + /// + /// - `stub`: `Some(icb)` if upgrading an existing stub entry, `None` if creating + /// from scratch. The stub typically has `parent` and `path` set but `attr` missing. + /// - `cache`: reference to the cache, useful for walking parent chains to build paths. + fn resolve( + &self, + ino: Inode, + stub: Option, + cache: &AsyncICache, + ) -> impl Future> + Send + where + Self: Sized; +} +``` + +**Step 2: Update `get_or_resolve` to handle stubs** + +Rewrite `get_or_resolve` in `AsyncICache`: + +```rust +pub async fn get_or_resolve( + &self, + ino: Inode, + then: impl FnOnce(&R::Icb) -> T, +) -> Result { + use scc::hash_map::Entry; + + let mut then_fn = Some(then); + + // Fast path: Available and fully resolved + { + let hit = self + .inode_table + .read_async(&ino, |_, s| match s { + IcbState::Available(icb) if !icb.needs_resolve() => { + let t = then_fn.take().unwrap_or_else(|| unreachable!()); + Some(t(icb)) + } + _ => None, + }) + .await; + if let Some(Some(r)) = hit { + return Ok(r); + } + } + + // Slow path: missing, InFlight, or stub needing resolution + loop { + match self.inode_table.entry_async(ino).await { + Entry::Occupied(mut occ) => match occ.get_mut() { + IcbState::Available(icb) if !icb.needs_resolve() => { + let t = then_fn.take().unwrap_or_else(|| unreachable!()); + return Ok(t(icb)); + } + IcbState::Available(_) => { + // Stub needing resolution — extract stub, replace with InFlight + let (tx, rx) = watch::channel(()); + let old = std::mem::replace(occ.get_mut(), IcbState::InFlight(rx)); + let stub = match old { + IcbState::Available(icb) => icb, + _ => unreachable!(), + }; + drop(occ); // release shard lock before awaiting + + match self.resolver.resolve(ino, Some(stub), self).await { + Ok(icb) => { + let t = then_fn.take().unwrap_or_else(|| unreachable!()); + let result = t(&icb); + self.inode_table + .upsert_async(ino, IcbState::Available(icb)) + .await; + drop(tx); + return Ok(result); + } + Err(e) => { + self.inode_table.remove_async(&ino).await; + drop(tx); + return Err(e); + } + } + } + IcbState::InFlight(rx) => { + let mut rx = rx.clone(); + drop(occ); + let _ = rx.changed().await; + } + }, + Entry::Vacant(vac) => { + let (tx, rx) = watch::channel(()); + vac.insert_entry(IcbState::InFlight(rx)); + + match self.resolver.resolve(ino, None, self).await { + Ok(icb) => { + let t = then_fn.take().unwrap_or_else(|| unreachable!()); + let result = t(&icb); + self.inode_table + .upsert_async(ino, IcbState::Available(icb)) + .await; + drop(tx); + return Ok(result); + } + Err(e) => { + self.inode_table.remove_async(&ino).await; + drop(tx); + return Err(e); + } + } + } + } + } +} +``` + +**Step 3: Add `get_icb_mut_sync` for initialization** + +Add after `for_each`: + +```rust +/// Synchronous mutable access to an `Available` entry. +/// Does **not** wait for `InFlight`. Intended for initialization. +pub fn get_icb_mut_sync(&self, ino: Inode, f: impl FnOnce(&mut R::Icb) -> T) -> Option { + self.inode_table + .update(&ino, |_, state| match state { + IcbState::Available(icb) => Some(f(icb)), + IcbState::InFlight(_) => None, + }) + .flatten() +} +``` + +**Step 4: Update existing tests** + +All test resolvers need the new signature. Update `TestResolver`: + +```rust +impl IcbResolver for TestResolver { + type Icb = TestIcb; + type Error = String; + + fn resolve( + &self, + ino: Inode, + _stub: Option, + _cache: &AsyncICache, + ) -> impl Future> + Send { + let result = self.responses.lock().expect("test mutex").remove(&ino) + .unwrap_or_else(|| Err(format!("no response for inode {ino}"))); + async move { result } + } +} +``` + +Similarly update `CountingResolver`. Also update `TestIcb` to implement `needs_resolve`: + +```rust +impl IcbLike for TestIcb { + // ... existing methods ... + fn needs_resolve(&self) -> bool { + false + } +} +``` + +**Step 5: Add test for stub resolution** + +```rust +#[tokio::test] +async fn get_or_resolve_resolves_stubs() { + let resolver = TestResolver::new(); + resolver.add(42, TestIcb { rc: 1, path: "/resolved".into() }); + let cache = test_cache_with(resolver); + + // Insert a stub that needs_resolve + // We need a TestIcb variant that returns true for needs_resolve... + // For this test, use a NeedsResolveIcb or modify TestIcb. + // Simplest: make TestIcb.needs_resolve configurable. +} +``` + +Note: to properly test stub resolution, `TestIcb` needs a way to signal `needs_resolve() == true`. Add an optional field: + +```rust +#[derive(Debug, Clone, PartialEq)] +struct TestIcb { + rc: u64, + path: PathBuf, + resolved: bool, // defaults to true in existing tests +} + +impl IcbLike for TestIcb { + fn needs_resolve(&self) -> bool { + !self.resolved + } + // ... +} +``` + +Then test: +```rust +#[tokio::test] +async fn get_or_resolve_resolves_stub_entry() { + let resolver = TestResolver::new(); + resolver.add(42, TestIcb { rc: 1, path: "/resolved".into(), resolved: true }); + let cache = test_cache_with(resolver); + + // Insert unresolved stub + cache.insert_icb(42, TestIcb { rc: 0, path: "/stub".into(), resolved: false }).await; + + // get_or_resolve should trigger resolution because needs_resolve() == true + let path: Result = cache.get_or_resolve(42, |icb| icb.path.clone()).await; + assert_eq!(path, Ok(PathBuf::from("/resolved"))); +} +``` + +**Step 6: Run tests** + +Run: `cargo test -p git-fs --lib fs::icache::async_cache::tests` +Expected: PASS + +**Step 7: Update icache/mod.rs exports** + +Remove `#[cfg_attr(not(test), expect(dead_code))]` and `#[expect(unused_imports)]` annotations. + +**Step 8: Commit** + +```bash +git add src/fs/icache/async_cache.rs src/fs/icache/mod.rs +git commit -m "feat(icache): extend IcbResolver with stub+cache params, handle stubs in get_or_resolve" +``` + +--- + +## Task 3: Make InodeFactory atomic + +**Files:** +- Modify: `src/fs/icache/inode_factory.rs` + +**Step 1: Rewrite with AtomicU64** + +```rust +use std::sync::atomic::{AtomicU64, Ordering}; +use crate::fs::r#trait::Inode; + +pub struct InodeFactory { + next_inode: AtomicU64, +} + +impl InodeFactory { + pub fn new(start: Inode) -> Self { + Self { next_inode: AtomicU64::new(start) } + } + + pub fn allocate(&self) -> Inode { + self.next_inode.fetch_add(1, Ordering::Relaxed) + } +} +``` + +**Step 2: Add tests, verify, commit** + +```bash +git add src/fs/icache/inode_factory.rs +git commit -m "feat(icache): make InodeFactory atomic" +``` + +--- + +## Task 4: Rewrite MescloudICache as generic wrapper + +**Files:** +- Modify: `src/fs/mescloud/icache.rs` + +This is the core rewrite. `MescloudICache` becomes `MescloudICache` generic over the resolver. + +**Step 1: Define the new MescloudICache** + +```rust +use std::ffi::OsStr; +use std::time::SystemTime; + +use tracing::warn; + +use crate::fs::icache::{AsyncICache, IcbLike, IcbResolver, InodeFactory}; +use crate::fs::r#trait::{ + CommonFileAttr, DirEntryType, FileAttr, FileHandle, FilesystemStats, Inode, Permissions, +}; + +pub struct InodeControlBlock { + pub parent: Option, + pub rc: u64, + pub path: std::path::PathBuf, + pub attr: Option, +} + +impl IcbLike for InodeControlBlock { + fn new_root(path: std::path::PathBuf) -> Self { + Self { rc: 1, parent: None, path, attr: None } + } + fn rc(&self) -> u64 { self.rc } + fn rc_mut(&mut self) -> &mut u64 { &mut self.rc } + fn needs_resolve(&self) -> bool { self.attr.is_none() } +} + +/// Free function — usable by both MescloudICache and resolvers. +pub fn make_common_file_attr( + ino: Inode, perm: u16, atime: SystemTime, mtime: SystemTime, + fs_owner: (u32, u32), block_size: u32, +) -> CommonFileAttr { + CommonFileAttr { + ino, atime, mtime, + ctime: SystemTime::UNIX_EPOCH, + crtime: SystemTime::UNIX_EPOCH, + perm: Permissions::from_bits_truncate(perm), + nlink: 1, + uid: fs_owner.0, + gid: fs_owner.1, + blksize: block_size, + } +} + +pub fn blocks_of_size(block_size: u32, size: u64) -> u64 { + size.div_ceil(u64::from(block_size)) +} + +pub struct MescloudICache> { + inner: AsyncICache, + inode_factory: InodeFactory, + fs_owner: (u32, u32), + block_size: u32, +} +``` + +**Step 2: Implement methods** + +Key methods (all `&self`): + +```rust +impl> MescloudICache { + pub fn new(resolver: R, root_ino: Inode, fs_owner: (u32, u32), block_size: u32) -> Self { ... } + + // Delegated from AsyncICache (async): + pub async fn contains(&self, ino: Inode) -> bool { ... } + pub async fn get_icb(&self, ino: Inode, f: impl FnOnce(&InodeControlBlock) -> T) -> Option { ... } + pub async fn get_icb_mut(&self, ino: Inode, f: impl FnOnce(&mut InodeControlBlock) -> T) -> Option { ... } + pub async fn insert_icb(&self, ino: Inode, icb: InodeControlBlock) { ... } + pub async fn entry_or_insert_icb(&self, ino: Inode, factory: impl FnOnce() -> InodeControlBlock, then: impl FnOnce(&mut InodeControlBlock) -> T) -> T { ... } + pub async fn inc_rc(&self, ino: Inode) -> u64 { ... } + pub async fn forget(&self, ino: Inode, nlookups: u64) -> Option { ... } + pub async fn get_or_resolve(&self, ino: Inode, then: impl FnOnce(&InodeControlBlock) -> T) -> Result { ... } + + // Delegated (sync): + pub fn allocate_fh(&self) -> FileHandle { ... } + pub fn for_each(&self, f: impl FnMut(&Inode, &InodeControlBlock)) { ... } + pub fn inode_count(&self) -> usize { ... } + + // Domain-specific: + pub fn allocate_inode(&self) -> Inode { ... } + pub async fn get_attr(&self, ino: Inode) -> Option { ... } + pub async fn cache_attr(&self, ino: Inode, attr: FileAttr) { ... } + pub fn fs_owner(&self) -> (u32, u32) { ... } + pub fn block_size(&self) -> u32 { ... } + pub fn statfs(&self) -> FilesystemStats { ... } + + /// Find an existing child inode by (parent, name), or allocate a new one. + /// If the entry is new, inserts a stub ICB (parent+path set, attr=None). + pub async fn ensure_child_ino(&self, parent: Inode, name: &OsStr) -> Inode { ... } +} +``` + +Notable changes from old `MescloudICache`: +- `new()` takes a `resolver: R` parameter +- `make_common_file_attr` is now a free function (exported from module) +- `ensure_child_inode` is split: `ensure_child_ino` (finds/allocates + inserts stub) + `get_or_resolve` (resolves via resolver) +- `children` field removed from `InodeControlBlock` +- Constructor uses `get_icb_mut_sync` to set root attr + +**Step 3: Write tests for MescloudICache** + +Create tests using a `TestResolver` that creates simple directory/file ICBs: + +```rust +#[cfg(test)] +mod tests { + use super::*; + + struct TestMescloudResolver { + fs_owner: (u32, u32), + block_size: u32, + } + + impl IcbResolver for TestMescloudResolver { + type Icb = InodeControlBlock; + type Error = String; + + fn resolve( + &self, ino: Inode, stub: Option, + _cache: &AsyncICache, + ) -> impl Future> + Send { + let fs_owner = self.fs_owner; + let block_size = self.block_size; + async move { + let stub = stub.ok_or("no stub")?; + let now = SystemTime::now(); + let attr = FileAttr::Directory { + common: make_common_file_attr(ino, 0o755, now, now, fs_owner, block_size), + }; + Ok(InodeControlBlock { attr: Some(attr), ..stub }) + } + } + } + + // Tests: new_creates_root_with_attr, ensure_child_ino_allocates, + // get_or_resolve_populates_attr, etc. +} +``` + +**Step 4: Verify, commit** + +```bash +git add src/fs/mescloud/icache.rs +git commit -m "feat(mescloud): rewrite MescloudICache as generic over IcbResolver" +``` + +--- + +## Task 5: Implement RepoResolver + update RepoFs + +**Files:** +- Modify: `src/fs/mescloud/repo.rs` + +**Step 1: Define RepoResolver** + +```rust +use super::icache::{blocks_of_size, make_common_file_attr, InodeControlBlock, MescloudICache}; +use crate::fs::icache::{AsyncICache, IcbLike, IcbResolver}; + +pub(super) struct RepoResolver { + client: MesaClient, + org_name: String, + repo_name: String, + ref_: String, + fs_owner: (u32, u32), + block_size: u32, +} + +impl IcbResolver for RepoResolver { + type Icb = InodeControlBlock; + type Error = LookupError; + + fn resolve( + &self, + ino: Inode, + stub: Option, + cache: &AsyncICache, + ) -> impl Future> + Send + where + Self: Sized, + { + // Move data needed by the async block + let client = self.client.clone(); + let org_name = self.org_name.clone(); + let repo_name = self.repo_name.clone(); + let ref_ = self.ref_.clone(); + let fs_owner = self.fs_owner; + let block_size = self.block_size; + + async move { + let stub = stub.expect("RepoResolver requires a stub ICB with parent+path"); + let parent = stub.parent.expect("non-root inodes have parents"); + + // Build repo-relative path by walking parent chain + let file_path = build_path_from_cache(parent, &stub.path, cache).await; + + // Fetch from mesa content API + let content = client + .org(&org_name).repos().at(&repo_name).content() + .get(Some(ref_.as_str()), file_path.as_deref(), None) + .await + .map_err(MesaApiError::from)?; + + let now = std::time::SystemTime::now(); + let attr = match &content { + Content::File(f) => { + let size = f.size.to_u64().unwrap_or(0); + FileAttr::RegularFile { + common: make_common_file_attr(ino, 0o644, now, now, fs_owner, block_size), + size, + blocks: blocks_of_size(block_size, size), + } + } + Content::Symlink(s) => { + let size = s.size.to_u64().unwrap_or(0); + FileAttr::RegularFile { + common: make_common_file_attr(ino, 0o644, now, now, fs_owner, block_size), + size, + blocks: blocks_of_size(block_size, size), + } + } + Content::Dir(_) => FileAttr::Directory { + common: make_common_file_attr(ino, 0o755, now, now, fs_owner, block_size), + }, + }; + + Ok(InodeControlBlock { + parent: stub.parent, + path: stub.path, + rc: stub.rc, + attr: Some(attr), + }) + } + } +} + +/// Walk the parent chain in the cache to build the repo-relative path. +async fn build_path_from_cache( + parent: Inode, + name: &std::path::Path, + cache: &AsyncICache, +) -> Option { + use std::path::PathBuf; + + let mut components = vec![name.to_path_buf()]; + let mut current = parent; + while current != RepoFs::ROOT_INO { + let (path, next_parent) = cache + .get_icb(current, |icb| (icb.path.clone(), icb.parent)) + .await?; + components.push(path); + current = next_parent?; + } + components.reverse(); + let joined: PathBuf = components.iter().collect(); + joined.to_str().map(String::from) +} +``` + +**Step 2: Update RepoFs struct** + +```rust +pub struct RepoFs { + icache: MescloudICache, + readdir_buf: Vec, + open_files: HashMap, +} +``` + +Constructor creates the resolver and passes it to `MescloudICache::new()`. The `client`, `org_name`, `repo_name`, `ref_` move into the resolver. `path_of_inode` and `path_of_child` are removed (path building is now in the resolver + `build_path_from_cache`). + +**Step 3: Update `lookup` to use get_or_resolve** + +```rust +async fn lookup(&mut self, parent: Inode, name: &OsStr) -> Result { + let ino = self.icache.ensure_child_ino(parent, name).await; + let attr = self.icache.get_or_resolve(ino, |icb| { + icb.attr.expect("resolver should populate attr") + }).await?; + self.icache.inc_rc(ino).await; + Ok(attr) +} +``` + +**Step 4: Update `readdir`** + +Readdir still calls the API directly (batch operation). For each child, uses `ensure_child_ino` + `insert_icb` with full attr. Uses `readdir_buf` for return. + +Note: readdir needs the path for the API call. Since `path_of_inode` was removed, add a helper method on `MescloudICache` or use `build_path_from_cache` directly. Actually, `path_of_inode` should stay on `RepoFs` (or become a method that uses the icache). Keep it as an async method that walks the parent chain. + +**Step 5: Update `read`** + +`read` still calls the API directly (data transfer, not metadata caching). Needs `path_of_inode` for the path. + +**Step 6: Update remaining Fs methods** + +- `getattr`: `self.icache.get_attr(ino).await.ok_or(...)` (unchanged pattern) +- `open`: `self.icache.contains(ino).await`, `self.icache.allocate_fh()` +- `forget`: `self.icache.forget(ino, nlookups).await` +- `statfs`: `self.icache.statfs()` + +**Step 7: Verify compilation + commit** + +```bash +git add src/fs/mescloud/repo.rs +git commit -m "feat(mescloud): implement RepoResolver, update RepoFs to use async icache" +``` + +--- + +## Task 6: Implement OrgResolver + update OrgFs + +**Files:** +- Modify: `src/fs/mescloud/org.rs` + +**Step 1: Define OrgResolver** + +```rust +pub(super) struct OrgResolver { + client: MesaClient, + org_name: String, + fs_owner: (u32, u32), + block_size: u32, +} + +impl IcbResolver for OrgResolver { + type Icb = InodeControlBlock; + type Error = LookupError; + + fn resolve( + &self, ino: Inode, stub: Option, + _cache: &AsyncICache, + ) -> impl Future> + Send { + let client = self.client.clone(); + let org_name = self.org_name.clone(); + let fs_owner = self.fs_owner; + let block_size = self.block_size; + + async move { + let stub = stub.expect("OrgResolver requires stub"); + + // Determine if this is a repo or owner dir. + // For now, all org-level inodes are directories. + // Repo validation is done by the caller before get_or_resolve. + let now = SystemTime::now(); + let attr = FileAttr::Directory { + common: make_common_file_attr(ino, 0o755, now, now, fs_owner, block_size), + }; + + Ok(InodeControlBlock { attr: Some(attr), ..stub }) + } + } +} +``` + +Note: The OrgResolver creates directory ICBs. Repo validation (the `wait_for_sync` API call) stays in `OrgFs::lookup` as a pre-check before `get_or_resolve`. This keeps the resolver simple and the validation/orchestration logic (creating `RepoFs`, bridge mappings) in `OrgFs`. + +**Step 2: Update OrgFs struct** + +```rust +pub struct OrgFs { + name: String, + client: MesaClient, + icache: MescloudICache, + readdir_buf: Vec, + repo_inodes: HashMap, + owner_inodes: HashMap, + repos: Vec, +} +``` + +**Step 3: Update helper methods (async)** + +- `repo_slot_for_inode` → async (walks parent chain via `get_icb(...).await`) +- `inode_role` → async (calls `repo_slot_for_inode`) +- `ensure_owner_inode` → async (calls icache methods with `.await`) +- `ensure_repo_inode` → async +- `translate_repo_ino_to_org` → async +- `inode_table_get_attr` → async + +**Step 4: Update Fs trait implementations** + +Same patterns as Task 5: add `.await` to icache calls, use `readdir_buf`, update `inode_role(...).await`, etc. + +**Step 5: Verify compilation + commit** + +```bash +git add src/fs/mescloud/org.rs +git commit -m "feat(mescloud): implement OrgResolver, update OrgFs to use async icache" +``` + +--- + +## Task 7: Implement MesaResolver + update MesaFS + +**Files:** +- Modify: `src/fs/mescloud/mod.rs` + +**Step 1: Define MesaResolver** + +```rust +pub(super) struct MesaResolver { + fs_owner: (u32, u32), + block_size: u32, +} + +impl IcbResolver for MesaResolver { + type Icb = InodeControlBlock; + type Error = std::convert::Infallible; + + fn resolve( + &self, ino: Inode, stub: Option, + _cache: &AsyncICache, + ) -> impl Future> + Send { + let fs_owner = self.fs_owner; + let block_size = self.block_size; + async move { + let stub = stub.unwrap_or_else(|| InodeControlBlock { + parent: None, path: "/".into(), rc: 0, attr: None, + }); + let now = SystemTime::now(); + let attr = FileAttr::Directory { + common: make_common_file_attr(ino, 0o755, now, now, fs_owner, block_size), + }; + Ok(InodeControlBlock { attr: Some(attr), ..stub }) + } + } +} +``` + +**Step 2: Update MesaFS struct** + +```rust +pub struct MesaFS { + icache: MescloudICache, + readdir_buf: Vec, + org_inodes: HashMap, + org_slots: Vec, +} +``` + +**Step 3: Update helper methods + Fs implementations** + +Same patterns: make helper methods async, add `.await`, use `readdir_buf`, update `inode_role(ino).await`, etc. + +**Step 4: Verify full compilation + commit** + +```bash +git add src/fs/mescloud/mod.rs +git commit -m "feat(mescloud): implement MesaResolver, update MesaFS to use async icache" +``` + +--- + +## Task 8: Update common.rs + clean up + +**Files:** +- Modify: `src/fs/mescloud/common.rs` +- Modify: `src/fs/icache/mod.rs` + +**Step 1: Update InodeControlBlock re-export** + +The `children` field was removed. Verify `common.rs` still compiles with the new ICB structure. + +**Step 2: Clean up icache/mod.rs exports** + +Remove dead-code annotations, ensure `AsyncICache`, `IcbResolver`, `IcbLike` are all exported cleanly. + +**Step 3: Run full test suite** + +Run: `cargo test -p git-fs` +Run: `cargo clippy -p git-fs` + +**Step 4: Final commit** + +```bash +git add -A +git commit -m "chore: clean up async icache migration" +``` + +--- + +## Summary of files changed + +| File | Change | +|---|---| +| `src/fs/icache/mod.rs` | Add `needs_resolve()` to `IcbLike`, remove dead-code annotations | +| `src/fs/icache/async_cache.rs` | Update `IcbResolver` trait (stub + cache params), extend `get_or_resolve` for stubs, add `get_icb_mut_sync` | +| `src/fs/icache/inode_factory.rs` | Make atomic (`AtomicU64`, `&self`) | +| `src/fs/mescloud/icache.rs` | Full rewrite: generic `MescloudICache`, `make_common_file_attr` free fn, `ensure_child_ino`, remove `children` from ICB | +| `src/fs/mescloud/repo.rs` | Add `RepoResolver` (fetches from mesa content API), update all methods to async icache | +| `src/fs/mescloud/org.rs` | Add `OrgResolver` (creates directory attrs), update all methods to async icache | +| `src/fs/mescloud/mod.rs` | Add `MesaResolver` (creates static directory attrs), update all methods to async icache | +| `src/fs/mescloud/common.rs` | Update ICB re-export (no `children`) | +| `src/fs/local.rs` | Add `needs_resolve()` to local `InodeControlBlock` | diff --git a/docs/plans/2026-02-03-update-checker.md b/docs/plans/2026-02-03-update-checker.md new file mode 100644 index 0000000..898d65b --- /dev/null +++ b/docs/plans/2026-02-03-update-checker.md @@ -0,0 +1,217 @@ +# Update Checker Implementation Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Check at startup whether the user is running the latest released version of git-fs, and warn them if not. + +**Architecture:** Use the `self_update` crate to fetch the latest GitHub release from `mesa-dot-dev/git-fs`. Since releases use `canary-{short_sha}` tags (not semver), we embed the git commit SHA at build time and compare it against the latest release tag. If they differ, log an `error!` but continue execution normally. + +**Tech Stack:** `self_update` (GitHub backend), `vergen-gitcl` (build-time git SHA embedding) + +--- + +### Task 1: Add dependencies to Cargo.toml + +**Files:** +- Modify: `Cargo.toml` + +**Step 1: Add `self_update` and `vergen-gitcl` to Cargo.toml** + +Add to `[dependencies]`: +```toml +self_update = { version = "0.42", default-features = false, features = ["rustls"] } +``` + +Add a new section: +```toml +[build-dependencies] +vergen-gitcl = { version = "1", features = ["build"] } +``` + +The `self_update` crate is used to query GitHub releases. We disable default features and enable `rustls` to avoid linking OpenSSL. `vergen-gitcl` embeds the git short SHA at compile time so the binary knows what commit it was built from. + +**Step 2: Create `build.rs` to embed git SHA** + +Create: `build.rs` + +```rust +use vergen_gitcl::{BuildBuilder, Emitter, GitclBuilder}; + +fn main() -> Result<(), Box> { + let build = BuildBuilder::default().build_timestamp(false).build()?; + let gitcl = GitclBuilder::default().sha(true).build()?; + + Emitter::default() + .add_instructions(&build)? + .add_instructions(&gitcl)? + .emit()?; + + Ok(()) +} +``` + +This makes `VERGEN_GIT_SHA` available as an environment variable at compile time. + +**Step 3: Verify it compiles** + +Run: `cargo check` +Expected: Compiles without errors + +**Step 4: Commit** + +```bash +git add Cargo.toml Cargo.lock build.rs +git commit -m "feat: add self_update and vergen-gitcl dependencies" +``` + +--- + +### Task 2: Create `src/updates.rs` with update check logic + +**Files:** +- Create: `src/updates.rs` +- Modify: `src/main.rs` (add `mod updates;`) + +**Step 1: Create `src/updates.rs`** + +```rust +//! Checks whether the running binary is the latest released version. + +use tracing::{error, info}; + +/// The git SHA baked in at compile time by `vergen-gitcl`. +const BUILD_SHA: &str = env!("VERGEN_GIT_SHA"); + +/// Check GitHub for the latest release and warn if this binary is outdated. +/// +/// This function never fails the application — it logs errors and returns. +pub fn check_for_updates() { + let short_sha = &BUILD_SHA[..7.min(BUILD_SHA.len())]; + + let releases = match self_update::backends::github::ReleaseList::configure() + .repo_owner("mesa-dot-dev") + .repo_name("git-fs") + .build() + { + Ok(list) => match list.fetch() { + Ok(releases) => releases, + Err(e) => { + info!("Could not check for updates: {e}"); + return; + } + }, + Err(e) => { + info!("Could not configure update check: {e}"); + return; + } + }; + + let Some(latest) = releases.first() else { + info!("No releases found on GitHub."); + return; + }; + + // Release tags are "canary-{short_sha}". Extract the SHA suffix. + let latest_sha = latest + .version + .strip_prefix("canary-") + .unwrap_or(&latest.version); + + if short_sha == latest_sha { + info!("You are running the latest version ({short_sha})."); + } else { + error!( + "You are running git-fs built from commit {short_sha}, \ + but the latest release is from commit {latest_sha}. \ + Please update: https://github.com/mesa-dot-dev/git-fs/releases" + ); + } +} +``` + +**Step 2: Register the module in `src/main.rs`** + +Add `mod updates;` after the existing `mod fs;` line (line 10 of `src/main.rs`): + +```rust +mod app_config; +mod daemon; +mod fs; +mod updates; +``` + +**Step 3: Verify it compiles** + +Run: `cargo check` +Expected: Compiles without errors (there will be a "function never used" warning, which is fine — we call it in the next task) + +**Step 4: Commit** + +```bash +git add src/updates.rs src/main.rs +git commit -m "feat: add update checker module" +``` + +--- + +### Task 3: Call `check_for_updates()` from main + +**Files:** +- Modify: `src/main.rs` + +**Step 1: Add the update check call in `main()`** + +In `src/main.rs`, add the call right after tracing is initialized and before argument parsing (after line 51, before `let args = Args::parse();`): + +```rust +fn main() { + tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .with_span_events(FmtSpan::ENTER | FmtSpan::CLOSE) + .init(); + + updates::check_for_updates(); + + let args = Args::parse(); + // ... rest of main +``` + +**Step 2: Verify it compiles** + +Run: `cargo check` +Expected: Compiles without errors or warnings + +**Step 3: Run the binary to verify update check works** + +Run: `cargo run -- --help` +Expected: You should see either the "latest version" info log or the "please update" error log (depending on whether your local commit matches the latest release), followed by the normal help output. + +**Step 4: Commit** + +```bash +git add src/main.rs +git commit -m "feat: check for updates on startup" +``` + +--- + +### Task 4: Verify clippy and formatting pass + +**Files:** (no changes expected, just verification) + +**Step 1: Run clippy** + +Run: `cargo clippy -- -D warnings` +Expected: No errors or warnings. If there are issues (e.g., the strict lint config may flag `expect_used` or `unwrap_used`), fix them by replacing with match/if-let as needed. + +**Step 2: Run rustfmt** + +Run: `cargo fmt --check` +Expected: No formatting issues. + +**Step 3: Commit any fixes if needed** + +```bash +git add -A +git commit -m "fix: address clippy and formatting issues" +``` diff --git a/docs/plans/2026-02-04-mount-directory-interlock.md b/docs/plans/2026-02-04-mount-directory-interlock.md new file mode 100644 index 0000000..4bdc091 --- /dev/null +++ b/docs/plans/2026-02-04-mount-directory-interlock.md @@ -0,0 +1,185 @@ +# Mount Directory Interlock Implementation Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Ensure the mount directory is created (including all parents) before mounting, and return an error if the directory already exists and is non-empty. + +**Architecture:** Add a `prepare_mount_point` function in `daemon.rs` that checks whether the mount directory exists and is non-empty (error), creates it with `create_dir_all` if it doesn't exist (logging via `info!`), or proceeds silently if it exists and is empty. This runs in `daemon::run` before spawning the FUSE session. The existing validation in `app_config.rs` that checks for a parent directory is no longer needed since `create_dir_all` handles the full path. + +**Tech Stack:** Rust std (`std::fs`, `tokio::fs`), `tracing` (`info!`) + +--- + +### Task 1: Add `prepare_mount_point` function to `daemon.rs` + +**Files:** +- Modify: `src/daemon.rs` + +**Step 1: Write the `prepare_mount_point` function** + +Add the following function after the `managed_fuse` module (before `wait_for_exit`), around line 141: + +```rust +/// Prepares the mount point directory. +/// +/// - If the directory exists and is non-empty, returns an error. +/// - If the directory does not exist, creates it (including parents) and logs an info message. +/// - If the directory exists and is empty, does nothing. +async fn prepare_mount_point(mount_point: &std::path::Path) -> Result<(), std::io::Error> { + match tokio::fs::read_dir(mount_point).await { + Ok(mut entries) => { + if entries.next_entry().await?.is_some() { + return Err(std::io::Error::new( + std::io::ErrorKind::AlreadyExists, + format!( + "Mount point '{}' already exists and is not empty.", + mount_point.display() + ), + )); + } + Ok(()) + } + Err(e) if e.kind() == std::io::ErrorKind::NotFound => { + tokio::fs::create_dir_all(mount_point).await?; + info!(path = %mount_point.display(), "Created mount point directory."); + Ok(()) + } + Err(e) => Err(e), + } +} +``` + +The logic is: +- `read_dir` succeeds → directory exists. Check if it has any entry; if so, error out. +- `read_dir` fails with `NotFound` → directory doesn't exist. Create it and log. +- `read_dir` fails with another error → propagate it (e.g., permission denied). + +**Step 2: Call `prepare_mount_point` in `daemon::run`** + +In the `run` function (`src/daemon.rs:162`), add the call **before** `ManagedFuse::new`. The function currently looks like: + +```rust +pub async fn run( + config: app_config::Config, + handle: tokio::runtime::Handle, +) -> Result<(), std::io::Error> { + // Spawn the cache if it doesn't exist. + tokio::fs::create_dir_all(&config.cache.path).await?; + + debug!(config = ?config, "Starting git-fs daemon..."); + + let fuse = managed_fuse::ManagedFuse::new(&config); +``` + +Change it to: + +```rust +pub async fn run( + config: app_config::Config, + handle: tokio::runtime::Handle, +) -> Result<(), std::io::Error> { + // Spawn the cache if it doesn't exist. + tokio::fs::create_dir_all(&config.cache.path).await?; + + prepare_mount_point(&config.mount_point).await?; + + debug!(config = ?config, "Starting git-fs daemon..."); + + let fuse = managed_fuse::ManagedFuse::new(&config); +``` + +**Step 3: Add `info` to the tracing imports** + +The file currently imports `use tracing::{debug, error};` at line 5. Change to: + +```rust +use tracing::{debug, error, info}; +``` + +**Step 4: Verify it compiles** + +Run: `cargo check` +Expected: No errors. + +**Step 5: Commit** + +```bash +git add src/daemon.rs +git commit -m "feat: add mount point interlock - create dir or error if non-empty" +``` + +--- + +### Task 2: Remove stale mount_point parent validation from `app_config.rs` + +**Files:** +- Modify: `src/app_config.rs` + +**Step 1: Remove the mount_point parent check** + +In `Config::validate()` (`src/app_config.rs:225`), remove the mount_point parent directory validation block (lines 235-240): + +```rust + // REMOVE THIS BLOCK: + if self.mount_point.parent().is_none() { + errors.push(format!( + "Mount point path '{}' has no parent directory.", + self.mount_point.display() + )); + } +``` + +This check is no longer needed because `prepare_mount_point` in `daemon.rs` now calls `create_dir_all` which handles the full path including all parents. The only path that has no parent is `/`, and that's not a valid mount point for other reasons (the non-empty check will catch it). + +**Step 2: Verify it compiles** + +Run: `cargo check` +Expected: No errors. + +**Step 3: Commit** + +```bash +git add src/app_config.rs +git commit -m "refactor: remove stale mount_point parent validation" +``` + +--- + +### Task 3: Manual smoke test + +**Step 1: Test with a non-existent mount point** + +```bash +# Pick a temp path that doesn't exist +export TEST_MNT=$(mktemp -d)/git-fs-test-mnt +rmdir "$(dirname "$TEST_MNT")" # remove so the full path is gone +cargo run -- --config-path /dev/null run # uses default mount point +# Or set GIT_FS_MOUNT_POINT=$TEST_MNT +``` + +Expected: The directory is created and an `info` log line appears saying "Created mount point directory." + +**Step 2: Test with a non-empty mount point** + +```bash +mkdir -p /tmp/git-fs-nonempty-test +touch /tmp/git-fs-nonempty-test/somefile +GIT_FS_MOUNT_POINT=/tmp/git-fs-nonempty-test cargo run -- run +``` + +Expected: Error message about mount point not being empty. Process exits with an error. + +**Step 3: Test with an existing empty mount point** + +```bash +mkdir -p /tmp/git-fs-empty-test +GIT_FS_MOUNT_POINT=/tmp/git-fs-empty-test cargo run -- run +``` + +Expected: No error about the directory, proceeds to mount normally. + +**Step 4: Clean up** + +```bash +rm -rf /tmp/git-fs-nonempty-test /tmp/git-fs-empty-test +``` diff --git a/docs/plans/2026-02-05-homebrew-tap-formula.md b/docs/plans/2026-02-05-homebrew-tap-formula.md new file mode 100644 index 0000000..152f935 --- /dev/null +++ b/docs/plans/2026-02-05-homebrew-tap-formula.md @@ -0,0 +1,281 @@ +# Homebrew Tap Formula for git-fs (v2 — versioned releases) + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** On each promote-to-latest, create a permanent versioned GitHub release AND update the Homebrew tap with both an updated `git-fs.rb` (latest) and a new versioned `git-fs@{version}.rb` formula. + +**Architecture:** The promote-to-latest workflow gets two new capabilities: (1) it creates a permanent versioned release tag (e.g., `v0.1.1-alpha.1`) in addition to the ephemeral `latest` tag, and (2) it pushes a commit to `mesa-dot-dev/homebrew-tap` that updates `Formula/git-fs.rb` and creates `Formula/git-fs@{version}.rb`. Both formulas point to the permanent versioned release URL. + +**Tech Stack:** Homebrew Ruby formula, GitHub Actions, bash, `gh` CLI + +--- + +### Task 1: Add outputs and versioned release to the promote job + +**Files:** +- Modify: `.github/workflows/promote-to-latest.yml` (the `promote` job only) + +**Step 1: Add `outputs` to the `promote` job** + +The `update-homebrew` job needs the version and tag from `promote`. Add an `outputs` block to the `promote` job so downstream jobs can access them: + +```yaml + promote: + name: Promote canary to latest + runs-on: ubuntu-latest + outputs: + version: ${{ steps.canary.outputs.version }} + base_version: ${{ steps.canary.outputs.base_version }} + tag: ${{ steps.canary.outputs.tag }} + target: ${{ steps.canary.outputs.target }} + steps: + ... +``` + +**Step 2: Extract the base version (strip `+sha`) in the "Find latest canary release" step** + +After the existing `VERSION` extraction (line ~42), add: + +```bash + BASE_VERSION=$(echo "${VERSION}" | sed 's/+.*//') + echo "base_version=${BASE_VERSION}" >> "$GITHUB_OUTPUT" +``` + +**Step 3: Add a new step to create the permanent versioned release** + +After the "Create latest release" step (line ~86), add a new step: + +```yaml + - name: Create versioned release + env: + GH_TOKEN: ${{ github.token }} + VERSION: ${{ steps.canary.outputs.version }} + BASE_VERSION: ${{ steps.canary.outputs.base_version }} + TARGET: ${{ steps.canary.outputs.target }} + run: | + TAG="v${BASE_VERSION}" + if gh release view "${TAG}" &>/dev/null; then + echo "Release ${TAG} already exists, skipping." + exit 0 + fi + gh release create "${TAG}" \ + --title "git-fs ${BASE_VERSION}" \ + --notes "Stable release of git-fs ${BASE_VERSION}." \ + --target "${TARGET}" \ + assets/* +``` + +**Step 4: Commit** + +```bash +git add .github/workflows/promote-to-latest.yml +git commit -m "feat: add versioned release to promote workflow" +``` + +--- + +### Task 2: Rewrite the update-homebrew job + +**Files:** +- Modify: `.github/workflows/promote-to-latest.yml` (the `update-homebrew` job only) + +Replace the entire `update-homebrew` job with the version below. Key changes: +- Reads `base_version` from promote outputs +- Downloads tarball from the versioned release tag (not `latest`) +- Updates `Formula/git-fs.rb` with new `url`, `sha256`, and `version` +- Creates a new `Formula/git-fs@{version}.rb` with the correct Homebrew class name +- Commits both files in a single push + +```yaml + update-homebrew: + name: Update Homebrew formula + needs: [promote] + runs-on: ubuntu-latest + steps: + - name: Download macOS universal tarball + env: + GH_TOKEN: ${{ github.token }} + run: | + TAG="v${{ needs.promote.outputs.base_version }}" + curl -fSL -o git-fs-macos-universal.tar.gz \ + "https://github.com/${{ github.repository }}/releases/download/${TAG}/git-fs-macos-universal.tar.gz" + + - name: Compute SHA256 + id: sha + run: | + SHA=$(sha256sum git-fs-macos-universal.tar.gz | cut -d' ' -f1) + echo "sha256=${SHA}" >> "$GITHUB_OUTPUT" + echo "SHA256: ${SHA}" + + - name: Update tap formulae + env: + TAP_TOKEN: ${{ secrets.HOMEBREW_TAP_TOKEN }} + BASE_VERSION: ${{ needs.promote.outputs.base_version }} + SHA256: ${{ steps.sha.outputs.sha256 }} + run: | + git clone "https://x-access-token:${TAP_TOKEN}@github.com/mesa-dot-dev/homebrew-tap.git" tap + cd tap + + TAG="v${BASE_VERSION}" + URL="https://github.com/mesa-dot-dev/git-fs/releases/download/${TAG}/git-fs-macos-universal.tar.gz" + + # Compute Homebrew class name for versioned formula + # git-fs@0.1.1-alpha.1 → GitFsAT011Alpha1 + CLASS_NAME=$(ruby -e " + name = 'git-fs@${BASE_VERSION}' + class_name = name.capitalize + class_name.gsub!(/[-_.\s]([a-zA-Z0-9])/) { \$1.upcase } + class_name.tr!('+', 'x') + class_name.sub!(/(.)@(\d)/, '\1AT\2') + puts class_name + ") + FORMULA_FILE="Formula/git-fs@${BASE_VERSION}.rb" + + # Update Formula/git-fs.rb (latest) + cat > Formula/git-fs.rb << FORMULA + class GitFs < Formula + desc "Mount Mesa, GitHub and GitLab repositories as local filesystems via FUSE" + homepage "https://github.com/mesa-dot-dev/git-fs" + version "${BASE_VERSION}" + url "${URL}" + sha256 "${SHA256}" + license "MIT" + + depends_on :macos + depends_on cask: "macfuse" + + def install + bin.install "git-fs" + end + + test do + assert_match "git-fs", shell_output("#{bin}/git-fs --version", 2) + end + end + FORMULA + + # Create versioned formula (e.g., Formula/git-fs@0.1.1-alpha.1.rb) + cat > "${FORMULA_FILE}" << FORMULA + class ${CLASS_NAME} < Formula + desc "Mount Mesa, GitHub and GitLab repositories as local filesystems via FUSE" + homepage "https://github.com/mesa-dot-dev/git-fs" + version "${BASE_VERSION}" + url "${URL}" + sha256 "${SHA256}" + license "MIT" + + depends_on :macos + depends_on cask: "macfuse" + + def install + bin.install "git-fs" + end + + test do + assert_match "git-fs", shell_output("#{bin}/git-fs --version", 2) + end + end + FORMULA + + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + git add Formula/ + git diff --cached --quiet && echo "No changes to commit" && exit 0 + git commit -m "git-fs ${BASE_VERSION}" + git push +``` + +**Important detail about heredoc indentation:** The `cat > file << FORMULA` heredocs above must produce Ruby files with **no leading indentation** (Homebrew requires the class definition at column 0, methods indented 2 spaces). The heredocs in the YAML `run:` block must be written so the output has correct Ruby indentation — i.e., the content lines inside the heredoc should NOT be indented relative to the YAML block. Use `<<-FORMULA` with tab-stripping or write the content flush-left. + +**Step 1: Replace the update-homebrew job in the workflow file** + +Delete lines 102-135 (the current `update-homebrew` job) and replace with the YAML above. + +**Step 2: Verify YAML indentation is correct** + +The `update-homebrew` job must be at the same indent level as `promote` (2 spaces under `jobs:`). + +**Step 3: Commit** + +```bash +git add .github/workflows/promote-to-latest.yml +git commit -m "feat: versioned Homebrew formulae on promote-to-latest" +``` + +--- + +### Task 3: Update the initial formula in the tap repo + +**Files:** +- Modify: `Formula/git-fs.rb` (in `mesa-dot-dev/homebrew-tap` repo) + +The formula currently has no `version` field and points to the `latest` download URL. Update it to match the structure that CI will maintain, so the first CI run doesn't produce a confusing diff. + +**Step 1: Update the formula** + +Push directly to main in the tap repo: + +```bash +cd /tmp +rm -rf homebrew-tap-update +gh repo clone mesa-dot-dev/homebrew-tap homebrew-tap-update +cd homebrew-tap-update +``` + +Overwrite `Formula/git-fs.rb` with: + +```ruby +class GitFs < Formula + desc "Mount Mesa, GitHub and GitLab repositories as local filesystems via FUSE" + homepage "https://github.com/mesa-dot-dev/git-fs" + version "0.0.0" + url "https://github.com/mesa-dot-dev/git-fs/releases/download/v0.0.0/git-fs-macos-universal.tar.gz" + sha256 "PLACEHOLDER" + license "MIT" + + depends_on :macos + depends_on cask: "macfuse" + + def install + bin.install "git-fs" + end + + test do + assert_match "git-fs", shell_output("#{bin}/git-fs --version", 2) + end +end +``` + +**Step 2: Commit and push** + +```bash +git add Formula/git-fs.rb +git commit -m "Add version field to formula template" +git push +``` + +--- + +## Verification + +After all tasks are complete: + +1. Push the workflow changes to `main` in `mesa-dot-dev/git-fs` +2. Run the `Promote to Latest` workflow manually from GitHub Actions +3. Verify a new permanent release `v0.1.1-alpha.1` exists alongside `latest` +4. Verify `mesa-dot-dev/homebrew-tap` has both: + - `Formula/git-fs.rb` — updated with real SHA256 and versioned URL + - `Formula/git-fs@0.1.1-alpha.1.rb` — new file with same content and correct class name +5. Test install: `brew tap mesa-dot-dev/homebrew-tap && brew install git-fs` +6. Test versioned install: `brew install mesa-dot-dev/homebrew-tap/git-fs@0.1.1-alpha.1` + +## User install flow + +```bash +# Latest version +brew tap mesa-dot-dev/homebrew-tap +brew install git-fs + +# Specific version +brew install mesa-dot-dev/homebrew-tap/git-fs@0.1.1-alpha.1 +``` diff --git a/docs/plans/2026-02-06-decouple-dcache-icb.md b/docs/plans/2026-02-06-decouple-dcache-icb.md new file mode 100644 index 0000000..da8911f --- /dev/null +++ b/docs/plans/2026-02-06-decouple-dcache-icb.md @@ -0,0 +1,306 @@ +# Decouple DCache and Mescloud InodeControlBlock + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Separate the generic `MescloudDCache` implementation from the mescloud-specific `InodeControlBlock` struct, so that `src/fs/dcache/` contains only generic cache machinery and `src/fs/mescloud/` owns its own ICB definition. + +**Architecture:** Currently `src/fs/dcache/mescloud.rs` conflates two concerns: (1) the `MescloudDCache` wrapper (inode allocation, attr caching, statfs) and (2) the `InodeControlBlock` data structure specific to mescloud filesystems. We will split this file in two: `src/fs/dcache/dcache.rs` gets the `MescloudDCache` (renamed to just keep the module name generic), and `src/fs/mescloud/dcache.rs` gets the `InodeControlBlock`. We also rename `MescloudDCache` to a more generic name since the wrapper is not truly mescloud-specific. + +**Tech Stack:** Rust, no new dependencies. + +--- + +## Analysis: What Lives Where + +### Currently in `src/fs/dcache/mescloud.rs` (246 lines): +1. **`InodeControlBlock`** (lines 19-47) - Mescloud-specific ICB with `parent`, `rc`, `path`, `children`, `attr` fields + `IcbLike` impl +2. **`InodeFactory`** (lines 52-67) - Monotonically increasing inode allocator (private helper) +3. **`MescloudDCache`** (lines 75-241) - Wraps `DCache` with inode allocation, attr caching, child inode management, attr construction, statfs +4. **`blocks_of_size`** (line 243) - Utility function used in `repo.rs` + +### Consumers of `InodeControlBlock`: +- `src/fs/mescloud/common.rs:5` - `pub(super) use crate::fs::dcache::mescloud::InodeControlBlock` +- `src/fs/mescloud/mod.rs:17` - `use common::InodeControlBlock` (constructs ICB literals) +- `src/fs/mescloud/org.rs:11` - `use super::common::InodeControlBlock` (constructs ICB literals) +- `src/fs/dcache/mescloud.rs` itself (constructs ICB in `ensure_child_inode`) + +### Key insight: +`MescloudDCache` directly constructs `InodeControlBlock` literals inside `ensure_child_inode()` (line 164). This creates a hard coupling. To decouple, `MescloudDCache` must become generic over its ICB type (using `IcbLike`), or `ensure_child_inode` must be moved/changed. Since `ensure_child_inode` needs `parent`, `attr`, and `children` fields that go beyond `IcbLike`, the cleanest approach is: + +1. Move `InodeControlBlock` to `src/fs/mescloud/dcache.rs` +2. Keep `MescloudDCache` in `src/fs/dcache/` but rename the file to reflect it's a higher-level cache wrapper +3. Add a factory method to `IcbLike` so `MescloudDCache` can construct ICBs generically, OR keep `MescloudDCache` typed to `InodeControlBlock` but import it from the new location + +**Chosen approach:** The simplest correct refactor is: +- Move `InodeControlBlock` + its `IcbLike` impl to `src/fs/mescloud/dcache.rs` +- Keep `MescloudDCache` in `src/fs/dcache/` (rename file from `mescloud.rs` to `dcache.rs`) but make it import `InodeControlBlock` from `src/fs/mescloud::dcache` +- This creates a circular dependency problem: `dcache` depends on `mescloud` and `mescloud` depends on `dcache` + +**Revised approach:** To avoid circular deps, we must make `MescloudDCache` generic. Extend `IcbLike` with the additional capabilities that `ensure_child_inode` and attr methods need: + +- Add `parent(&self) -> Option` and `set_parent(&mut self, parent: Option)` to `IcbLike` +- Add `path(&self) -> &Path` to `IcbLike` +- Add `attr(&self) -> Option` and `set_attr(&mut self, attr: Option)` to `IcbLike` +- Add `children(&self) -> Option<&[DirEntry]>` and `children_mut` to `IcbLike` +- Add a new constructor `fn new_child(parent: Inode, path: PathBuf) -> Self` to `IcbLike` + +This is over-engineering. Let's reconsider. + +**Final approach (simplest):** +1. Rename `src/fs/dcache/mescloud.rs` to `src/fs/dcache/dcache.rs` - keep `MescloudDCache`, `InodeFactory`, `blocks_of_size` here +2. Create `src/fs/mescloud/dcache.rs` - move `InodeControlBlock` + `IcbLike` impl here +3. `MescloudDCache` in `src/fs/dcache/dcache.rs` imports `InodeControlBlock` from `crate::fs::mescloud::dcache` +4. **Circular dependency check:** `src/fs/dcache/dcache.rs` imports from `crate::fs::mescloud::dcache` and `src/fs/mescloud/*` imports from `crate::fs::dcache`. In Rust, cross-module imports within the same crate are fine as long as there are no circular `mod` declarations. Since both `dcache` and `mescloud` are siblings under `src/fs/mod.rs`, this works. + +--- + +## Additional Readability Opportunities Found + +1. **`src/fs/mescloud/common.rs`** - The `pub(super) use crate::fs::dcache::mescloud::InodeControlBlock` re-export (line 5) should change to import from the new location (`super::dcache::InodeControlBlock`). + +2. **`blocks_of_size` function** - Currently lives in `src/fs/dcache/mescloud.rs` (line 243) but is only used by `src/fs/mescloud/repo.rs`. It's a mescloud concern, not a generic dcache concern. Move it to `src/fs/mescloud/dcache.rs` alongside `InodeControlBlock`. + +--- + +## Tasks + +### Task 1: Create `src/fs/mescloud/dcache.rs` with `InodeControlBlock` + +**Files:** +- Create: `src/fs/mescloud/dcache.rs` +- Modify: `src/fs/mescloud/mod.rs` (add `pub mod dcache;` declaration) + +**Step 1: Create `src/fs/mescloud/dcache.rs`** + +```rust +//! Mescloud-specific inode control block and helpers. + +use crate::fs::dcache::IcbLike; +use crate::fs::r#trait::{DirEntry, Inode}; + +/// Inode control block for mescloud filesystem layers (MesaFS, OrgFs, RepoFs). +pub struct InodeControlBlock { + /// The root inode doesn't have a parent. + pub parent: Option, + pub rc: u64, + pub path: std::path::PathBuf, + pub children: Option>, + /// Cached file attributes from the last lookup. + pub attr: Option, +} + +impl IcbLike for InodeControlBlock { + fn new_root(path: std::path::PathBuf) -> Self { + Self { + rc: 1, + parent: None, + path, + children: None, + attr: None, + } + } + + fn rc(&self) -> u64 { + self.rc + } + + fn rc_mut(&mut self) -> &mut u64 { + &mut self.rc + } +} + +/// Calculate the number of blocks needed for a given size. +pub fn blocks_of_size(block_size: u32, size: u64) -> u64 { + size.div_ceil(u64::from(block_size)) +} +``` + +**Step 2: Add module declaration in `src/fs/mescloud/mod.rs`** + +Add `pub mod dcache;` after the existing module declarations (after line 25: `pub mod repo;`). The new line: + +```rust +pub mod dcache; +``` + +**Step 3: Verify it compiles** + +Run: `cargo check 2>&1 | head -30` +Expected: Compiles (new module exists but isn't consumed yet; existing code still uses old paths) + +**Step 4: Commit** + +```bash +git add src/fs/mescloud/dcache.rs src/fs/mescloud/mod.rs +git commit -m "Add mescloud/dcache.rs with InodeControlBlock and blocks_of_size" +``` + +--- + +### Task 2: Rename `src/fs/dcache/mescloud.rs` to `src/fs/dcache/dcache.rs` and update imports + +**Files:** +- Rename: `src/fs/dcache/mescloud.rs` -> `src/fs/dcache/dcache.rs` +- Modify: `src/fs/dcache/dcache.rs` (remove `InodeControlBlock`, `IcbLike` impl, and `blocks_of_size`; import `InodeControlBlock` from new location) +- Modify: `src/fs/dcache/mod.rs` (change `pub mod mescloud;` to `pub mod dcache;`, update re-exports) + +**Step 1: Rename the file** + +```bash +git mv src/fs/dcache/mescloud.rs src/fs/dcache/dcache.rs +``` + +**Step 2: Update `src/fs/dcache/mod.rs`** + +Replace the full contents with: + +```rust +//! Generic directory cache and inode management primitives. + +mod dcache; +pub mod bridge; +mod table; + +pub use dcache::MescloudDCache; +pub use table::DCache; + +/// Common interface for inode control block types usable with `DCache`. +pub trait IcbLike { + /// Create an ICB with rc=1, the given path, and no children. + fn new_root(path: std::path::PathBuf) -> Self; + fn rc(&self) -> u64; + fn rc_mut(&mut self) -> &mut u64; +} +``` + +Note: `pub mod mescloud` becomes `mod dcache` (private, since consumers access `MescloudDCache` via the re-export). + +**Step 3: Update `src/fs/dcache/dcache.rs`** + +Remove the `InodeControlBlock` struct (lines 19-27), its `IcbLike` impl (lines 29-47), and the `blocks_of_size` function (lines 243-245). + +Replace the import `use super::{DCache, IcbLike};` with: + +```rust +use super::{DCache, IcbLike}; +use crate::fs::mescloud::dcache::InodeControlBlock; +``` + +The `use crate::fs::r#trait::...` import line should drop `Permissions` only if it was exclusively used by `InodeControlBlock`. Check: `Permissions` is still used by `make_common_file_attr` (line 193), so keep it. + +The file should now contain only: `InodeFactory`, `MescloudDCache`, and their impls. No `InodeControlBlock`, no `blocks_of_size`. + +**Step 4: Verify it compiles** + +Run: `cargo check 2>&1 | head -30` +Expected: Compiles successfully + +**Step 5: Commit** + +```bash +git add src/fs/dcache/ +git commit -m "Rename dcache/mescloud.rs to dcache/dcache.rs, import ICB from mescloud" +``` + +--- + +### Task 3: Update `src/fs/mescloud/common.rs` import path + +**Files:** +- Modify: `src/fs/mescloud/common.rs` + +**Step 1: Update the import** + +Change line 5 from: +```rust +pub(super) use crate::fs::dcache::mescloud::InodeControlBlock; +``` +to: +```rust +pub(super) use super::dcache::InodeControlBlock; +``` + +**Step 2: Verify it compiles** + +Run: `cargo check 2>&1 | head -30` +Expected: Compiles successfully + +**Step 3: Commit** + +```bash +git add src/fs/mescloud/common.rs +git commit -m "Update InodeControlBlock import in common.rs to use mescloud::dcache" +``` + +--- + +### Task 4: Update `src/fs/mescloud/repo.rs` import path + +**Files:** +- Modify: `src/fs/mescloud/repo.rs` + +**Step 1: Update the import** + +Change line 17 from: +```rust +use crate::fs::dcache::mescloud::{self as mescloud_dcache, MescloudDCache}; +``` +to: +```rust +use crate::fs::dcache::MescloudDCache; +use super::dcache as mescloud_dcache; +``` + +This keeps the `mescloud_dcache::blocks_of_size` call on line 138 working since `blocks_of_size` now lives in `src/fs/mescloud/dcache.rs`. + +**Step 2: Verify it compiles** + +Run: `cargo check 2>&1 | head -30` +Expected: Compiles successfully + +**Step 3: Commit** + +```bash +git add src/fs/mescloud/repo.rs +git commit -m "Update repo.rs imports to use mescloud::dcache for blocks_of_size" +``` + +--- + +### Task 5: Final verification and cleanup + +**Step 1: Full build check** + +Run: `cargo check 2>&1` +Expected: No errors, no warnings related to our changes + +**Step 2: Verify no remaining references to old path** + +Search for `dcache::mescloud::` across the codebase. Should find zero results (all references now go through the re-export or the new path). + +Run: `grep -r "dcache::mescloud" src/` +Expected: No output + +**Step 3: Verify file structure matches goal** + +``` +src/fs/dcache/ + mod.rs - re-exports DCache, MescloudDCache, IcbLike trait + dcache.rs - MescloudDCache, InodeFactory (imports InodeControlBlock from mescloud) + table.rs - generic DCache + bridge.rs - HashMapBridge + +src/fs/mescloud/ + mod.rs - MesaFS (top-level container) + dcache.rs - InodeControlBlock, blocks_of_size <-- NEW + common.rs - error types, re-exports InodeControlBlock + org.rs - OrgFs (single org) + repo.rs - RepoFs (single repo) +``` + +**Step 4: Commit (if any cleanup was needed)** + +```bash +git add -A +git commit -m "Final cleanup: verify decoupled dcache and mescloud ICB" +``` diff --git a/docs/plans/2026-02-06-default-run-subcommand.md b/docs/plans/2026-02-06-default-run-subcommand.md new file mode 100644 index 0000000..30ee9ae --- /dev/null +++ b/docs/plans/2026-02-06-default-run-subcommand.md @@ -0,0 +1,70 @@ +# Default `run` Subcommand Implementation Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Make `run` the default subcommand so `git-fs` works without explicitly typing `run`. + +**Architecture:** Use clap's `Args::command` as `Option` and default to `Run { daemonize: false }` when no subcommand is provided. This is the idiomatic clap approach — no external crates or hacks needed. + +**Tech Stack:** Rust, clap 4.x (derive) + +--- + +### Task 1: Make `run` the default subcommand + +**Files:** +- Modify: `src/main.rs:30-31` (change `command` field to `Option`) +- Modify: `src/main.rs:56` (unwrap_or default to `Run`) + +**Step 1: Change the `Args` struct to make `command` optional** + +In `src/main.rs`, change the `Args` struct: + +```rust +struct Args { + #[arg( + short, + long, + value_parser, + help = "Optional path to a mesa config TOML." + )] + config_path: Option, + + #[command(subcommand)] + command: Option, +} +``` + +**Step 2: Default to `Run` when no subcommand is given** + +In the `main()` function, change: + +```rust + match args.command { +``` + +to: + +```rust + match args.command.unwrap_or(Command::Run { daemonize: false }) { +``` + +**Step 3: Build and verify it compiles** + +Run: `cargo check` +Expected: Compiles with no errors. + +**Step 4: Manual smoke test** + +Run: `cargo build && ./target/debug/git-fs --help` +Expected: Help text shows `run` and `reload` as subcommands, but `run` is no longer required. + +Run: `cargo build && ./target/debug/git-fs` (without `run`) +Expected: Behaves the same as `git-fs run` (attempts to start the daemon). + +**Step 5: Commit** + +```bash +git add src/main.rs +git commit -m "MES-707: make run the default subcommand" +``` diff --git a/docs/plans/2026-02-09-async-icache-resolver.md b/docs/plans/2026-02-09-async-icache-resolver.md new file mode 100644 index 0000000..e817625 --- /dev/null +++ b/docs/plans/2026-02-09-async-icache-resolver.md @@ -0,0 +1,858 @@ +# Async ICache Resolver Refactor Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Refactor `AsyncICache` so the cache manages the full InFlight lifecycle internally via an `IcbResolver` trait, eliminating the spin-lock race condition and removing manual `mark_inflight`/`complete` calls. + +**Architecture:** Replace `Arc` with `tokio::sync::watch` channels to eliminate the race condition where a notification fires between cloning the handle and awaiting it. Introduce an `IcbResolver` trait that acts as a "promise" to eventually produce an ICB for a given inode. The cache itself manages the InFlight→Available transition via a new `get_or_resolve` method. The struct becomes `AsyncICache` with the ICB type derived from `R::Icb`. Public `mark_inflight`/`complete` are removed. + +**Tech Stack:** Rust 2024, `scc` 3.5.6 (`HashMap`), `tokio::sync::watch`, `std::sync::atomic::AtomicU64`, RPITIT (return-position impl trait in traits) + +--- + +## Key Design Decisions + +### `IcbResolver` trait + +```rust +pub trait IcbResolver: Send + Sync { + type Icb: IcbLike + Send + Sync; + type Error: Send; + + fn resolve( + &self, + ino: Inode, + ) -> impl Future> + Send; +} +``` + +Uses RPITIT (Rust 2024 edition) instead of `#[async_trait]` — no heap allocation. + +### `IcbState` with `watch` + +```rust +pub enum IcbState { + InFlight(watch::Receiver<()>), + Available(I), +} +``` + +The `watch::Sender<()>` is held by the task performing resolution. When the sender is dropped (whether from success or failure), all `Receiver::changed().await` calls wake up with `Err(RecvError)` — guaranteeing no missed notifications. + +### `AsyncICache` + +```rust +pub struct AsyncICache { + resolver: R, + inode_table: ConcurrentHashMap>, + next_fh: AtomicU64, +} +``` + +Single type parameter. ICB type is `R::Icb`. + +### `wait_for_available` — no loop + +```rust +async fn wait_for_available(&self, ino: Inode) -> bool { + let rx = self.inode_table.read_async(&ino, |_, s| match s { + IcbState::InFlight(rx) => Some(rx.clone()), + IcbState::Available(_) => None, + }).await; + + match rx { + None => false, // key missing + Some(None) => true, // Available + Some(Some(mut rx)) => { + // Wait for sender to signal (or drop) + let _ = rx.changed().await; + // Re-check: entry should now be Available or removed + self.inode_table.read_async(&ino, |_, s| + matches!(s, IcbState::Available(_)) + ).await.unwrap_or(false) + } + } +} +``` + +No loop. `watch::Receiver::changed()` never misses — if the sender already signaled or was dropped before `.changed().await`, it returns immediately. + +### `get_or_resolve` — cache-managed lifecycle + +```rust +pub async fn get_or_resolve( + &self, + ino: Inode, + then: impl FnOnce(&I) -> R2, +) -> Result { ... } +``` + +1. Check if `Available` → run `then`, return +2. If `InFlight` → clone receiver, await, re-check, run `then` +3. If absent → insert `InFlight(rx)`, call `resolver.resolve(ino).await`, on success upsert `Available`, on error remove entry, wake all waiters either way + +### Removed public API + +- `mark_inflight` — removed (internal only via `get_or_resolve`) +- `complete` — removed (internal only via `get_or_resolve`) + +### Retained public API (unchanged signatures) + +- `new(resolver, root_ino, root_path)` — now takes resolver as first arg +- `allocate_fh(&self) -> FileHandle` +- `inode_count(&self) -> usize` +- `contains(&self, ino) -> bool` (async, awaits InFlight) +- `get_icb(&self, ino, f) -> Option` (async, awaits InFlight) +- `get_icb_mut(&self, ino, f) -> Option` (async, awaits InFlight) +- `insert_icb(&self, ino, icb)` (async) +- `entry_or_insert_icb(&self, ino, factory, then) -> R` (async) +- `inc_rc(&self, ino) -> u64` (async) +- `forget(&self, ino, nlookups) -> Option` (async) +- `for_each(&self, f)` (sync iteration) + +--- + +## Files + +- **Modify:** `src/fs/icache/async_cache.rs` (all tasks) +- **Modify:** `src/fs/icache/mod.rs` (Task 1 only — re-export `IcbResolver`) + +--- + +## Task 1: Add `IcbResolver` trait and make `AsyncICache` generic over resolver + +**Files:** +- Modify: `src/fs/icache/async_cache.rs` +- Modify: `src/fs/icache/mod.rs` + +**Step 1: Write the failing test** + +Add a new `TestResolver` to the test module and update `TestIcb` and test helpers. Replace the first two tests (`contains_returns_true_for_root`, `new_creates_root_entry`) to use the resolver-based constructor. + +Add at the top of `mod tests`: + +```rust +use std::collections::HashMap as StdHashMap; +use std::sync::Mutex; + +struct TestResolver { + /// Pre-loaded responses keyed by inode. + responses: Mutex>>, +} + +impl TestResolver { + fn new() -> Self { + Self { + responses: Mutex::new(StdHashMap::new()), + } + } + + fn add(&self, ino: Inode, icb: TestIcb) { + self.responses.lock().unwrap().insert(ino, Ok(icb)); + } + + fn add_err(&self, ino: Inode, err: impl Into) { + self.responses.lock().unwrap().insert(ino, Err(err.into())); + } +} + +impl IcbResolver for TestResolver { + type Icb = TestIcb; + type Error = String; + + fn resolve( + &self, + ino: Inode, + ) -> impl Future> + Send { + let result = self + .responses + .lock() + .unwrap() + .remove(&ino) + .unwrap_or_else(|| Err(format!("no response for inode {ino}"))); + async move { result } + } +} + +/// Helper: build a cache with a `TestResolver` at root inode 1. +fn test_cache() -> AsyncICache { + AsyncICache::new(TestResolver::new(), 1, "/root") +} + +/// Helper: build a cache with a given resolver at root inode 1. +fn test_cache_with(resolver: TestResolver) -> AsyncICache { + AsyncICache::new(resolver, 1, "/root") +} +``` + +Update the existing `new_creates_root_entry` and `contains_returns_true_for_root` tests to use `test_cache()`: + +```rust +#[tokio::test] +async fn new_creates_root_entry() { + let cache = test_cache(); + assert_eq!(cache.inode_count(), 1, "should have exactly 1 entry"); +} + +#[tokio::test] +async fn contains_returns_true_for_root() { + let cache = test_cache(); + assert!(cache.contains(1).await, "root should exist"); +} +``` + +**Step 2: Run tests to verify they fail** + +Run: `cargo test -p git-fs icache::async_cache -- --nocapture` +Expected: FAIL — `IcbResolver` trait and new `AsyncICache::new` signature don't exist + +**Step 3: Implement `IcbResolver` trait and update `AsyncICache`** + +At the top of `async_cache.rs`, add the `Future` import and trait definition: + +```rust +use std::future::Future; +``` + +```rust +/// Trait for resolving an inode to its control block. +/// +/// Implementations act as a "promise" that an ICB will eventually be produced +/// for a given inode. The cache calls `resolve` when it needs to populate a +/// missing entry. +pub trait IcbResolver: Send + Sync { + /// The inode control block type this resolver produces. + type Icb: IcbLike + Send + Sync; + /// Error type returned when resolution fails. + type Error: Send; + + /// Resolve an inode to its control block. + fn resolve( + &self, + ino: Inode, + ) -> impl Future> + Send; +} +``` + +Change the struct: + +```rust +pub struct AsyncICache { + resolver: R, + inode_table: ConcurrentHashMap>, + next_fh: AtomicU64, +} +``` + +Change the `impl` block signature: + +```rust +impl AsyncICache { +``` + +Update the constructor to accept a resolver: + +```rust +pub fn new(resolver: R, root_ino: Inode, root_path: impl Into) -> Self { + let table = ConcurrentHashMap::new(); + drop(table.insert_sync( + root_ino, + IcbState::Available(R::Icb::new_root(root_path.into())), + )); + Self { + resolver, + inode_table: table, + next_fh: AtomicU64::new(1), + } +} +``` + +Replace all `I` type references in method signatures/bodies with `R::Icb`. Specifically: + +- `IcbState` → already generic, no change needed (the enum stays `IcbState`) +- In method signatures: `icb: I` → `icb: R::Icb`, `FnOnce(&I)` → `FnOnce(&R::Icb)`, `FnOnce(&mut I)` → `FnOnce(&mut R::Icb)`, `FnOnce() -> I` → `FnOnce() -> R::Icb`, `Option` → `Option` +- Remove the `I: IcbLike + Send + Sync` bound from the impl block (it's now derived from `R::Icb`) + +Update all existing tests to use `test_cache()` instead of `AsyncICache::::new(1, "/root")`. + +For tests that used `Arc::new(AsyncICache::::new(...))`, use `Arc::new(test_cache())` instead. + +For tests that used `cache.mark_inflight(42).await` and `cache.complete(42, ...)`, keep them compiling for now (they still exist); they'll be removed in Task 4. + +**Step 4: Update `mod.rs`** + +Add `IcbResolver` to the re-exports: + +```rust +#[expect(unused_imports)] +pub use async_cache::AsyncICache; +#[expect(unused_imports)] +pub use async_cache::IcbResolver; +``` + +**Step 5: Run tests to verify they pass** + +Run: `cargo test -p git-fs icache::async_cache -- --nocapture` +Expected: PASS (19 tests) + +**Step 6: Run clippy** + +Run: `cargo clippy -p git-fs -- -D warnings` +Expected: PASS + +**Step 7: Commit** + +```bash +git add src/fs/icache/async_cache.rs src/fs/icache/mod.rs +git commit -m "refactor(icache): add IcbResolver trait, make AsyncICache generic over resolver" +``` + +--- + +## Task 2: Replace `Notify` with `watch` and rewrite `wait_for_available` + +**Files:** +- Modify: `src/fs/icache/async_cache.rs` + +**Step 1: Write the failing test** + +Add a test that validates the race-condition-free behavior — specifically that waiting on an already-completed entry doesn't hang: + +```rust +#[tokio::test] +async fn wait_does_not_miss_signal_on_immediate_complete() { + let cache = Arc::new(test_cache()); + + // Insert InFlight, then immediately complete before anyone waits + let (tx, rx) = tokio::sync::watch::channel(()); + cache + .inode_table + .upsert_async(42, IcbState::InFlight(rx)) + .await; + + // Complete before any waiter — drop sender to signal + cache + .insert_icb( + 42, + TestIcb { + rc: 1, + path: "/fast".into(), + }, + ) + .await; + drop(tx); + + // This must NOT hang — the signal was already sent + let result = tokio::time::timeout( + std::time::Duration::from_millis(100), + cache.contains(42), + ) + .await; + assert_eq!(result, Ok(true), "should not hang on already-completed entry"); +} +``` + +**Step 2: Run test to verify it fails** + +Run: `cargo test -p git-fs icache::async_cache::tests::wait_does_not_miss_signal -- --nocapture` +Expected: FAIL — `IcbState::InFlight` expects `Arc`, not `watch::Receiver` + +**Step 3: Replace `Notify` with `watch` throughout** + +1. Change imports: + +Remove: +```rust +use std::sync::Arc; +use tokio::sync::Notify; +``` + +Add: +```rust +use tokio::sync::watch; +``` + +2. Update `IcbState`: + +```rust +pub enum IcbState { + /// Entry is being loaded; waiters clone the receiver and `.changed().await`. + InFlight(watch::Receiver<()>), + /// Entry is ready for use. + Available(I), +} +``` + +3. Rewrite `wait_for_available` (no loop): + +```rust +async fn wait_for_available(&self, ino: Inode) -> bool { + let rx = self + .inode_table + .read_async(&ino, |_, s| match s { + IcbState::InFlight(rx) => Some(rx.clone()), + IcbState::Available(_) => None, + }) + .await; + + match rx { + None => false, // key missing + Some(None) => true, // Available + Some(Some(mut rx)) => { + // Wait for the resolver to complete (or fail/drop sender). + // changed() returns Err(RecvError) when sender is dropped, + // which is fine — it means resolution finished. + let _ = rx.changed().await; + // Re-check: entry is now Available or was removed on error. + self.inode_table + .read_async(&ino, |_, s| matches!(s, IcbState::Available(_))) + .await + .unwrap_or(false) + } + } +} +``` + +4. Update `mark_inflight` to use `watch`: + +```rust +pub async fn mark_inflight(&self, ino: Inode) -> watch::Sender<()> { + let (tx, rx) = watch::channel(()); + self.inode_table + .upsert_async(ino, IcbState::InFlight(rx)) + .await; + tx +} +``` + +5. Update `complete` to accept `watch::Sender`: + +Actually, `complete` no longer needs the sender — it just upserts Available. The old sender being dropped will notify waiters. But we need the sender to still be alive when `complete` is called to ensure proper sequencing. Simplify: `complete` upserts Available. The caller drops the sender afterward (or it's already dropped). + +```rust +pub async fn complete(&self, ino: Inode, icb: R::Icb) { + self.inode_table + .upsert_async(ino, IcbState::Available(icb)) + .await; + // Waiters wake when the sender (held by caller) is dropped. + // If sender was already dropped, waiters already woke from changed().await Err. +} +``` + +6. Update `entry_or_insert_icb` — replace `Arc::clone(notify)` with `rx.clone()`: + +```rust +IcbState::InFlight(rx) => { + let mut rx = rx.clone(); + drop(occ); // release shard lock before awaiting + let _ = rx.changed().await; +} +``` + +7. Update all tests that use `mark_inflight`/`complete`: + +Replace `let _notify = cache.mark_inflight(42).await;` with `let _tx = cache.mark_inflight(42).await;`. + +The `complete` calls remain the same signature (still takes `ino, icb`), but now the `_tx` being dropped after `complete` signals the waiters. In tests, ensure `_tx` is dropped *after* `complete`: + +```rust +// In tests that use mark_inflight + complete: +let tx = cache.mark_inflight(42).await; +// ... spawn task that waits ... +cache.complete(42, TestIcb { ... }).await; +drop(tx); // signal all waiters +``` + +**Step 4: Run tests to verify they pass** + +Run: `cargo test -p git-fs icache::async_cache -- --nocapture` +Expected: PASS (20 tests — 19 existing + 1 new) + +**Step 5: Run clippy** + +Run: `cargo clippy -p git-fs -- -D warnings` +Expected: PASS + +**Step 6: Commit** + +```bash +git add src/fs/icache/async_cache.rs +git commit -m "refactor(icache): replace Notify with watch channels, eliminate spin-lock" +``` + +--- + +## Task 3: Add `get_or_resolve` method + +**Files:** +- Modify: `src/fs/icache/async_cache.rs` + +**Step 1: Write the failing tests** + +```rust +#[tokio::test] +async fn get_or_resolve_returns_existing() { + let cache = test_cache(); + cache + .insert_icb(42, TestIcb { rc: 1, path: "/existing".into() }) + .await; + + let path: Result = cache + .get_or_resolve(42, |icb| icb.path.clone()) + .await; + assert_eq!(path, Ok(PathBuf::from("/existing"))); +} + +#[tokio::test] +async fn get_or_resolve_resolves_missing() { + let resolver = TestResolver::new(); + resolver.add(42, TestIcb { rc: 1, path: "/resolved".into() }); + let cache = test_cache_with(resolver); + + let path: Result = cache + .get_or_resolve(42, |icb| icb.path.clone()) + .await; + assert_eq!(path, Ok(PathBuf::from("/resolved"))); + // Should now be cached + assert!(cache.contains(42).await); +} + +#[tokio::test] +async fn get_or_resolve_propagates_error() { + let resolver = TestResolver::new(); + resolver.add_err(42, "network error"); + let cache = test_cache_with(resolver); + + let result: Result = cache + .get_or_resolve(42, |icb| icb.path.clone()) + .await; + assert_eq!(result, Err("network error".to_owned())); + // Entry should be cleaned up on error + assert!(!cache.contains(42).await); +} + +#[tokio::test] +async fn get_or_resolve_coalesces_concurrent_requests() { + use std::sync::atomic::{AtomicUsize, Ordering}; + + let resolve_count = Arc::new(AtomicUsize::new(0)); + let resolve_count2 = Arc::clone(&resolve_count); + + // A resolver that counts calls and delays slightly + struct CountingResolver { + count: Arc, + } + impl IcbResolver for CountingResolver { + type Icb = TestIcb; + type Error = String; + fn resolve( + &self, + _ino: Inode, + ) -> impl Future> + Send { + self.count.fetch_add(1, Ordering::SeqCst); + async { + tokio::task::yield_now().await; + Ok(TestIcb { rc: 1, path: "/coalesced".into() }) + } + } + } + + let cache = Arc::new(AsyncICache::new( + CountingResolver { count: resolve_count2 }, + 1, + "/root", + )); + + // Spawn 5 concurrent get_or_resolve for the same inode + let mut handles = Vec::new(); + for _ in 0..5 { + let c = Arc::clone(&cache); + handles.push(tokio::spawn(async move { + c.get_or_resolve(42, |icb| icb.path.clone()).await + })); + } + + for h in handles { + assert_eq!( + h.await.expect("task panicked"), + Ok(PathBuf::from("/coalesced")), + ); + } + + // Resolver should only have been called ONCE (not 5 times) + assert_eq!(resolve_count.load(Ordering::SeqCst), 1, "should coalesce to 1 resolve call"); +} +``` + +**Step 2: Run tests to verify they fail** + +Run: `cargo test -p git-fs icache::async_cache -- --nocapture` +Expected: FAIL — `get_or_resolve` doesn't exist + +**Step 3: Implement `get_or_resolve`** + +```rust +/// Look up `ino`. If `Available`, run `then` and return `Ok(R)`. +/// If absent, call the resolver to fetch the ICB, cache it, then run `then`. +/// If another task is already resolving this inode (`InFlight`), wait for it. +/// +/// Returns `Err(R::Error)` if resolution fails. On error the `InFlight` +/// entry is removed so subsequent calls can retry. +pub async fn get_or_resolve( + &self, + ino: Inode, + then: impl FnOnce(&R::Icb) -> R2, +) -> Result { + use scc::hash_map::Entry; + + // Fast path: already Available + { + let hit = self + .inode_table + .read_async(&ino, |_, s| match s { + IcbState::Available(icb) => Some(then(icb)), + IcbState::InFlight(_) => None, + }) + .await; + match hit { + Some(Some(r)) => return Ok(r), + Some(None) => { /* InFlight — fall through */ } + None => { /* absent — fall through */ } + } + } + + // Try to become the resolver, or wait on existing InFlight + let mut then_fn = Some(then); + loop { + match self.inode_table.entry_async(ino).await { + Entry::Occupied(mut occ) => match occ.get_mut() { + IcbState::Available(icb) => { + let t = then_fn.take().unwrap_or_else(|| unreachable!()); + return Ok(t(icb)); + } + IcbState::InFlight(rx) => { + let mut rx = rx.clone(); + drop(occ); + let _ = rx.changed().await; + // Re-check on next loop iteration + } + }, + Entry::Vacant(vac) => { + // We win the race — install InFlight and resolve + let (tx, rx) = watch::channel(()); + vac.insert_entry(IcbState::InFlight(rx)); + + match self.resolver.resolve(ino).await { + Ok(icb) => { + let t = then_fn.take().unwrap_or_else(|| unreachable!()); + let result = self + .inode_table + .update_async(&ino, |_, state| { + *state = IcbState::Available(icb); + }) + .await; + // If update_async returned None, entry was removed + // between our insert and here (shouldn't happen, but + // handle gracefully). + if result.is_none() { + // Re-read to get the value we just set — but the + // entry was removed, so we need to re-insert. + // This is an edge case that shouldn't occur in + // practice. For safety, drop tx and retry. + drop(tx); + } else { + // Read the now-Available value to run `then` + drop(tx); // wake all waiters + let r = self + .inode_table + .read_async(&ino, |_, s| match s { + IcbState::Available(icb) => Some(t(icb)), + IcbState::InFlight(_) => None, + }) + .await + .flatten(); + if let Some(r) = r { + return Ok(r); + } + } + // Extremely unlikely fallthrough — retry + } + Err(e) => { + // Remove the InFlight entry + self.inode_table.remove_async(&ino).await; + drop(tx); // wake all waiters — they'll see entry missing + return Err(e); + } + } + } + } + } +} +``` + +> **Note on the loop:** Unlike `wait_for_available`, this loop only iterates if: +> (a) we were waiting on InFlight and it completed — we loop back to read Available, or +> (b) an extremely unlikely race removed our entry — we retry. +> It is NOT a spin-lock: every iteration either returns or awaits a `watch::changed()`. + +**Step 4: Run tests to verify they pass** + +Run: `cargo test -p git-fs icache::async_cache -- --nocapture` +Expected: PASS (23 tests — 20 existing + 4 new, though the coalescing test may need the `CountingResolver` to be defined at module level if inner items with impls are not supported) + +**Step 5: Run clippy** + +Run: `cargo clippy -p git-fs -- -D warnings` +Expected: PASS + +**Step 6: Commit** + +```bash +git add src/fs/icache/async_cache.rs +git commit -m "feat(icache): add get_or_resolve with automatic InFlight lifecycle management" +``` + +--- + +## Task 4: Remove public `mark_inflight`/`complete`, update tests + +**Files:** +- Modify: `src/fs/icache/async_cache.rs` + +**Step 1: Make `mark_inflight` and `complete` private (or remove)** + +Remove `pub` from `mark_inflight` and `complete`. If they are only used by tests, move them into a `#[cfg(test)]` impl block or remove entirely if tests have been updated. + +Check: are `mark_inflight` and `complete` still used by any test? + +Tests that used them: +- `contains_awaits_inflight_then_returns_true` +- `get_icb_awaits_inflight` +- `entry_or_insert_awaits_inflight` +- `for_each_skips_inflight` +- `wait_does_not_miss_signal_on_immediate_complete` + +**Step 2: Rewrite these tests to use `get_or_resolve` instead** + +Replace tests that manually managed InFlight with resolver-based tests: + +```rust +#[tokio::test] +async fn contains_awaits_inflight_then_returns_true() { + let resolver = TestResolver::new(); + resolver.add(42, TestIcb { rc: 1, path: "/test".into() }); + let cache = Arc::new(test_cache_with(resolver)); + + // Trigger resolve in background + let cache2 = Arc::clone(&cache); + let handle = tokio::spawn(async move { + cache2.get_or_resolve(42, |_| ()).await + }); + + handle.await.expect("task panicked").expect("resolve failed"); + assert!(cache.contains(42).await, "should be true after resolve"); +} + +#[tokio::test] +async fn get_icb_awaits_inflight_via_resolver() { + use std::sync::atomic::{AtomicBool, Ordering}; + + let resolver = TestResolver::new(); + resolver.add(42, TestIcb { rc: 1, path: "/loaded".into() }); + let cache = Arc::new(test_cache_with(resolver)); + + // Resolve inode 42 first + let _: Result<(), String> = cache.get_or_resolve(42, |_| ()).await; + + let path = cache.get_icb(42, |icb| icb.path.clone()).await; + assert_eq!(path, Some(PathBuf::from("/loaded"))); +} + +#[tokio::test] +async fn for_each_skips_inflight_via_resolver() { + // Use a resolver that never responds (we'll insert InFlight manually for test) + let cache = test_cache(); + // Directly insert an InFlight entry for testing iteration + let (_tx, rx) = watch::channel(()); + cache + .inode_table + .upsert_async(42, IcbState::InFlight(rx)) + .await; + + let mut count = 0; + cache.for_each(|_, _| { + count += 1; + }); + assert_eq!(count, 1, "only root, not the InFlight entry"); +} +``` + +**Step 3: Remove `mark_inflight` and `complete` methods entirely** + +Delete the `mark_inflight` and `complete` methods from the impl block. Also remove the `// -- InFlight management --` section comment. + +**Step 4: Run tests to verify they pass** + +Run: `cargo test -p git-fs icache::async_cache -- --nocapture` +Expected: PASS + +**Step 5: Run clippy and fmt** + +Run: `cargo clippy -p git-fs -- -D warnings && cargo fmt -p git-fs --check` +Expected: PASS + +**Step 6: Commit** + +```bash +git add src/fs/icache/async_cache.rs +git commit -m "refactor(icache): remove public mark_inflight/complete, use resolver-driven lifecycle" +``` + +--- + +## Task 5: Clean up `entry_or_insert_icb` InFlight handling + +**Files:** +- Modify: `src/fs/icache/async_cache.rs` + +Now that the cache owns the resolver, `entry_or_insert_icb` should also use the resolver pattern for InFlight entries instead of its own loop. However, `entry_or_insert_icb` serves a different purpose — it's for callers that already have an ICB to insert (factory pattern). The InFlight wait inside it should use the `watch`-based wait (which it does after Task 2). + +**Step 1: Verify `entry_or_insert_icb` uses watch correctly** + +Read the current state and verify: +- InFlight branch clones `rx`, drops the entry, awaits `rx.changed()` +- No `Arc` references remain anywhere + +**Step 2: Audit for any remaining `Notify` or `Arc` imports** + +Search the file for any `Notify`, `Arc`, or `use std::sync::Arc` — remove if unused. + +**Step 3: Run full test suite** + +Run: `cargo test -p git-fs icache::async_cache -- --nocapture` +Expected: PASS + +**Step 4: Run clippy and fmt** + +Run: `cargo clippy -p git-fs -- -D warnings && cargo fmt -p git-fs --check` +Expected: PASS + +**Step 5: Commit (if changes were needed)** + +```bash +git add src/fs/icache/async_cache.rs +git commit -m "refactor(icache): clean up remaining Notify references" +``` + +--- + +## Verification + +After all tasks are complete: + +1. **Run all async_cache tests:** `cargo test -p git-fs icache::async_cache -- --nocapture` +2. **Run full test suite:** `cargo test -p git-fs` +3. **Check lints:** `cargo clippy -p git-fs -- -D warnings` +4. **Check formatting:** `cargo fmt -p git-fs --check` + +All commands should pass without errors or warnings. diff --git a/docs/plans/2026-02-09-mesa-dev-migration.md b/docs/plans/2026-02-09-mesa-dev-migration.md new file mode 100644 index 0000000..88f0188 --- /dev/null +++ b/docs/plans/2026-02-09-mesa-dev-migration.md @@ -0,0 +1,469 @@ +# mesa-dev 0.1.1 → 1.8.0 Migration Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Fix all compilation errors caused by upgrading mesa-dev from 0.1.1 to 1.8.0. + +**Architecture:** The mesa-dev crate restructured its API from flat module access (`mesa_dev::Mesa`, `mesa_dev::models::*`, `mesa_dev::error::*`) to a hierarchical client pattern (`MesaClient::builder()...build()`, `client.org().repos().at().content()`) with OpenAPI-generated model types. We need to update 4 files in `src/fs/mescloud/` to use the new imports, builder pattern, navigation API, and model types. + +**Tech Stack:** Rust, mesa-dev 1.8.0, mesa_dev_oapi 1.8.0, thiserror, futures + +--- + +## API Change Summary + +| Old (0.1.1) | New (1.8.0) | +|---|---| +| `mesa_dev::Mesa` | `mesa_dev::MesaClient` | +| `mesa_dev::error::MesaError` | `mesa_dev::low_level::apis::Error` (generic per endpoint) | +| `mesa_dev::models::Repo` | `mesa_dev::models::GetByOrgRepos200ResponseReposInner` (list) / `mesa_dev::models::PostByOrgRepos201Response` (get) | +| `mesa_dev::models::Content` | `mesa_dev::low_level::content::Content` | +| `mesa_dev::models::Content::File { size, content, .. }` | `Content::File(f)` where `f.size: f64`, `f.content: Option` | +| `mesa_dev::models::Content::Dir { entries, .. }` | `Content::Dir(d)` where `d.entries: Vec` | +| `mesa_dev::models::DirEntryType::{File, Dir}` | `mesa_dev::low_level::content::DirEntry::{File(_), Symlink(_), Dir(_)}` | +| `Mesa::builder(api_key).base_url(url).build()` | `MesaClient::builder().with_api_key(key).with_base_path(url).build()` | +| `client.content(org, repo).get(path, ref_)` | `client.org(org).repos().at(repo).content().get(ref_, path, depth)` | +| `client.repos(org).get(repo)` | `client.org(org).repos().at(repo).get()` | +| `client.repos(org).list_all()` | `client.org(org).repos().list(None)` | +| `Repo.default_branch: String` | `repo.default_branch: Option` | +| `Repo.name: String` | `repo.name: Option` | +| `Repo.status: Option` | Field removed (no sync status on model) | + +--- + +### Task 1: Fix error types in `common.rs` + +**Files:** +- Modify: `src/fs/mescloud/common.rs` + +The old `mesa_dev::error::MesaError` no longer exists. The new error type `mesa_dev::low_level::apis::Error` is generic (different `T` per endpoint), so we can't use `#[from]`. Change to storing a `String` and use `.map_err()` at call sites. + +**Step 1: Update the three error enums** + +Replace all three occurrences of: +```rust +#[error("remote mesa error: {0}")] +RemoteMesaError(#[from] mesa_dev::error::MesaError), +``` + +With: +```rust +#[error("remote mesa error: {0}")] +RemoteMesaError(String), +``` + +This affects `LookupError` (line 16), `ReadError` (line 65), and `ReadDirError` (line 91). + +**Step 2: Verify the file is self-consistent** + +No further changes needed in `common.rs` — the `From<...> for i32` impls still work since they match on the variant name, not the inner type. + +--- + +### Task 2: Fix client import and builder in `mod.rs` + +**Files:** +- Modify: `src/fs/mescloud/mod.rs` + +**Step 1: Update the import** + +Change line 5 from: +```rust +use mesa_dev::Mesa as MesaClient; +``` +To: +```rust +use mesa_dev::MesaClient; +``` + +**Step 2: Update the builder call** + +Change the client construction in `MesaFS::new()` (lines 70-72) from: +```rust +let client = MesaClient::builder(org_conf.api_key.expose_secret()) + .base_url(MESA_API_BASE_URL) + .build(); +``` +To: +```rust +let client = MesaClient::builder() + .with_api_key(org_conf.api_key.expose_secret()) + .with_base_path(MESA_API_BASE_URL) + .build(); +``` + +--- + +### Task 3: Fix `org.rs` — imports, types, and API calls + +**Files:** +- Modify: `src/fs/mescloud/org.rs` + +**Step 1: Update the import** + +Change line 7 from: +```rust +use mesa_dev::Mesa as MesaClient; +``` +To: +```rust +use mesa_dev::MesaClient; +``` + +**Step 2: Update `wait_for_sync` method** + +The old `Repo` model had a `status` field; the new one does not. Change the method signature and body (lines 282-293). + +From: +```rust +async fn wait_for_sync( + &self, + repo_name: &str, +) -> Result { + let mut repo = self.client.repos(&self.name).get(repo_name).await?; + while repo.status.is_some() { + trace!(repo = repo_name, "repo is syncing, waiting..."); + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + repo = self.client.repos(&self.name).get(repo_name).await?; + } + Ok(repo) +} +``` + +To: +```rust +async fn wait_for_sync( + &self, + repo_name: &str, +) -> Result { + self.client + .org(&self.name) + .repos() + .at(repo_name) + .get() + .await + .map_err(|e| e.to_string()) +} +``` + +**Step 3: Update `lookup` — OrgRoot branch** + +In lines 375-381, the `?` on `wait_for_sync` now returns a `String` error. Update to map into `LookupError`: + +From: +```rust +let repo = self.wait_for_sync(name_str).await?; + +let (ino, attr) = self.ensure_repo_inode( + name_str, + name_str, + &repo.default_branch, + Self::ROOT_INO, +); +``` + +To: +```rust +let repo = self + .wait_for_sync(name_str) + .await + .map_err(LookupError::RemoteMesaError)?; + +let default_branch = repo.default_branch.as_deref().unwrap_or("main"); +let (ino, attr) = self.ensure_repo_inode( + name_str, + name_str, + default_branch, + Self::ROOT_INO, +); +``` + +**Step 4: Update `lookup` — OwnerDir branch** + +Similarly update lines 408-411: + +From: +```rust +let repo = self.wait_for_sync(&encoded).await?; + +let (ino, attr) = + self.ensure_repo_inode(&encoded, repo_name_str, &repo.default_branch, parent); +``` + +To: +```rust +let repo = self + .wait_for_sync(&encoded) + .await + .map_err(LookupError::RemoteMesaError)?; + +let default_branch = repo.default_branch.as_deref().unwrap_or("main"); +let (ino, attr) = + self.ensure_repo_inode(&encoded, repo_name_str, default_branch, parent); +``` + +**Step 5: Update `readdir` — OrgRoot branch** + +Change the repo listing (lines 467-478) from: +```rust +let repos: Vec = self + .client + .repos(&self.name) + .list_all() + .try_collect() + .await?; + +let repo_infos: Vec<(String, String)> = repos + .into_iter() + .filter(|r| r.status.is_none()) // skip repos still syncing + .map(|r| (r.name, r.default_branch)) + .collect(); +``` + +To: +```rust +let repos: Vec = self + .client + .org(&self.name) + .repos() + .list(None) + .try_collect() + .await + .map_err(|e| ReadDirError::RemoteMesaError(e.to_string()))?; + +let repo_infos: Vec<(String, String)> = repos + .into_iter() + .filter_map(|r| { + let name = r.name?; + let branch = r.default_branch.unwrap_or_else(|| "main".to_owned()); + Some((name, branch)) + }) + .collect(); +``` + +--- + +### Task 4: Fix `repo.rs` — imports, content API, and pattern matching + +**Files:** +- Modify: `src/fs/mescloud/repo.rs` + +**Step 1: Update imports** + +Change line 9 from: +```rust +use mesa_dev::Mesa as MesaClient; +``` +To: +```rust +use mesa_dev::MesaClient; +use mesa_dev::low_level::content::{Content, DirEntry as MesaDirEntry}; +``` + +**Step 2: Update `lookup` — content API call** + +Change lines 121-125 from: +```rust +let content = self + .client + .content(&self.org_name, &self.repo_name) + .get(file_path.as_deref(), Some(self.ref_.as_str())) + .await?; +``` + +To: +```rust +let content = self + .client + .org(&self.org_name) + .repos() + .at(&self.repo_name) + .content() + .get(Some(self.ref_.as_str()), file_path.as_deref(), None) + .await + .map_err(|e| LookupError::RemoteMesaError(e.to_string()))?; +``` + +Note: parameter order changed from `(path, ref)` to `(ref, path, depth)`. + +**Step 3: Update `lookup` — Content pattern matching** + +Change lines 127-144 from: +```rust +let kind = match &content { + mesa_dev::models::Content::File { .. } => DirEntryType::RegularFile, + mesa_dev::models::Content::Dir { .. } => DirEntryType::Directory, +}; + +let (ino, _) = self.icache.ensure_child_inode(parent, name, kind); + +let now = SystemTime::now(); +let attr = match content { + mesa_dev::models::Content::File { size, .. } => FileAttr::RegularFile { + common: self.icache.make_common_file_attr(ino, 0o644, now, now), + size, + blocks: mescloud_icache::blocks_of_size(Self::BLOCK_SIZE, size), + }, + mesa_dev::models::Content::Dir { .. } => FileAttr::Directory { + common: self.icache.make_common_file_attr(ino, 0o755, now, now), + }, +}; +``` + +To: +```rust +let kind = match &content { + Content::File(_) | Content::Symlink(_) => DirEntryType::RegularFile, + Content::Dir(_) => DirEntryType::Directory, +}; + +let (ino, _) = self.icache.ensure_child_inode(parent, name, kind); + +let now = SystemTime::now(); +let attr = match &content { + Content::File(f) | Content::Symlink(f) => { + #[expect(clippy::cast_sign_loss, clippy::cast_possible_truncation)] + let size = f.size as u64; + FileAttr::RegularFile { + common: self.icache.make_common_file_attr(ino, 0o644, now, now), + size, + blocks: mescloud_icache::blocks_of_size(Self::BLOCK_SIZE, size), + } + } + Content::Dir(_) => FileAttr::Directory { + common: self.icache.make_common_file_attr(ino, 0o755, now, now), + }, +}; +``` + +**Step 4: Update `readdir` — content API call** + +Change lines 180-184 from: +```rust +let content = self + .client + .content(&self.org_name, &self.repo_name) + .get(file_path.as_deref(), Some(self.ref_.as_str())) + .await?; +``` + +To: +```rust +let content = self + .client + .org(&self.org_name) + .repos() + .at(&self.repo_name) + .content() + .get(Some(self.ref_.as_str()), file_path.as_deref(), None) + .await + .map_err(|e| ReadDirError::RemoteMesaError(e.to_string()))?; +``` + +**Step 5: Update `readdir` — Content + DirEntry pattern matching** + +Change lines 186-200 from: +```rust +let mesa_entries = match content { + mesa_dev::models::Content::Dir { entries, .. } => entries, + mesa_dev::models::Content::File { .. } => return Err(ReadDirError::NotADirectory), +}; + +let collected: Vec<_> = mesa_entries + .into_iter() + .map(|e| { + let kind = match e.entry_type { + mesa_dev::models::DirEntryType::File => DirEntryType::RegularFile, + mesa_dev::models::DirEntryType::Dir => DirEntryType::Directory, + }; + (e.name, kind) + }) + .collect(); +``` + +To: +```rust +let mesa_entries = match content { + Content::Dir(d) => d.entries, + Content::File(_) | Content::Symlink(_) => return Err(ReadDirError::NotADirectory), +}; + +let collected: Vec<(String, DirEntryType)> = mesa_entries + .into_iter() + .filter_map(|e| { + let (name, kind) = match e { + MesaDirEntry::File(f) => (f.name?, DirEntryType::RegularFile), + MesaDirEntry::Symlink(s) => (s.name?, DirEntryType::RegularFile), + MesaDirEntry::Dir(d) => (d.name?, DirEntryType::Directory), + }; + Some((name, kind)) + }) + .collect(); +``` + +The explicit `Vec<(String, DirEntryType)>` annotation resolves the E0282 type inference error on `OsStr::new(name)` at line 206. + +**Step 6: Update `read` — content API call and pattern matching** + +Change lines 271-280 from: +```rust +let content = self + .client + .content(&self.org_name, &self.repo_name) + .get(file_path.as_deref(), Some(self.ref_.as_str())) + .await?; + +let encoded_content = match content { + mesa_dev::models::Content::File { content, .. } => content, + mesa_dev::models::Content::Dir { .. } => return Err(ReadError::NotAFile), +}; +``` + +To: +```rust +let content = self + .client + .org(&self.org_name) + .repos() + .at(&self.repo_name) + .content() + .get(Some(self.ref_.as_str()), file_path.as_deref(), None) + .await + .map_err(|e| ReadError::RemoteMesaError(e.to_string()))?; + +let encoded_content = match content { + Content::File(f) | Content::Symlink(f) => { + f.content.unwrap_or_default() + } + Content::Dir(_) => return Err(ReadError::NotAFile), +}; +``` + +--- + +### Task 5: Build and verify + +**Step 1: Run cargo build** + +Run: `cargo build` +Expected: Successful compilation with no errors. + +**Step 2: Fix any remaining warnings or errors** + +If there are clippy warnings about `wildcard_enum_match_arm` or other lints, address them. The project has strict clippy settings (`clippy::all = "deny"`, `clippy::pedantic = "warn"`). + +--- + +### Task 6: Commit + +**Step 1: Stage and commit** + +```bash +git add src/fs/mescloud/common.rs src/fs/mescloud/mod.rs src/fs/mescloud/org.rs src/fs/mescloud/repo.rs +git commit -m "Migrate mesa-dev from 0.1.1 to 1.8.0 + +Update all API call sites to use the new hierarchical client +pattern (client.org().repos().at().content()), new Content/DirEntry +enums from mesa_dev::low_level::content, and string-based error +wrapping since the error type is now generic per endpoint." +``` diff --git a/docs/plans/2026-02-09-shellcheck-workflow.md b/docs/plans/2026-02-09-shellcheck-workflow.md new file mode 100644 index 0000000..c612a0b --- /dev/null +++ b/docs/plans/2026-02-09-shellcheck-workflow.md @@ -0,0 +1,131 @@ +# ShellCheck GitHub Workflow Implementation Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Harden the existing ShellCheck GitHub workflow to lint all shell scripts in the repo (not just `install.sh`), fix existing warnings, and enforce strictest settings. + +**Architecture:** Replace the hardcoded single-file shellcheck invocation with a dynamic `find`-based approach that catches all `.sh` files. Fix existing shellcheck warnings in `tests/docker/entrypoint.sh`. Both scripts use `#!/bin/sh` (POSIX sh), so `--shell=sh` stays appropriate as a default, but we switch to auto-detection so future scripts with different shebangs work correctly. + +**Tech Stack:** GitHub Actions, ShellCheck (pre-installed on `ubuntu-latest`) + +--- + +### Task 1: Fix shellcheck warnings in `tests/docker/entrypoint.sh` + +**Files:** +- Modify: `tests/docker/entrypoint.sh:7` + +**Step 1: Run shellcheck locally to confirm current warnings** + +Run: `shellcheck --shell=sh --severity=style --enable=all --external-sources --format=gcc ./tests/docker/entrypoint.sh` +Expected output: +``` +./tests/docker/entrypoint.sh:7:9: note: Prefer double quoting even when variables don't contain special characters. [SC2248] +./tests/docker/entrypoint.sh:7:9: note: Prefer putting braces around variable references even when not strictly required. [SC2250] +``` + +**Step 2: Fix the warnings** + +Change line 7 in `tests/docker/entrypoint.sh` from: +```sh +while [ $elapsed -lt 60 ]; do +``` +to: +```sh +while [ "${elapsed}" -lt 60 ]; do +``` + +**Step 3: Run shellcheck again to verify clean** + +Run: `shellcheck --shell=sh --severity=style --enable=all --external-sources --format=gcc ./tests/docker/entrypoint.sh` +Expected: No output (clean) + +**Step 4: Commit** + +```bash +git add tests/docker/entrypoint.sh +git commit -m "fix: resolve shellcheck warnings in entrypoint.sh" +``` + +--- + +### Task 2: Update the ShellCheck workflow to lint all shell scripts + +**Files:** +- Modify: `.github/workflows/shellcheck.yml` + +**Step 1: Replace the workflow file with the improved version** + +Replace the entire content of `.github/workflows/shellcheck.yml` with: + +```yaml +name: ShellCheck + +on: + push: + branches: [main] + paths: ["**.sh"] + pull_request: + branches: [main] + paths: ["**.sh"] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + shellcheck: + name: ShellCheck + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Collect shell scripts + id: collect + run: | + files=$(find . -name '*.sh' -type f | sort) + if [ -z "${files}" ]; then + echo "No .sh files found" + echo "skip=true" >> "$GITHUB_OUTPUT" + else + echo "Found shell scripts:" + echo "${files}" + echo "skip=false" >> "$GITHUB_OUTPUT" + fi + + - name: Run ShellCheck + if: steps.collect.outputs.skip != 'true' + run: | + shellcheck --version + find . -name '*.sh' -type f -print0 \ + | xargs -0 shellcheck \ + --severity=style \ + --enable=all \ + --external-sources \ + --check-sourced \ + --format=gcc +``` + +Key changes from the existing workflow: +- **Finds all `.sh` files dynamically** instead of hardcoding `./install.sh` +- **Removed `--shell=sh`** — lets shellcheck auto-detect from the shebang, so bash scripts (if any are added later) get proper checks too +- **Added `--check-sourced`** — also checks files that are sourced by other scripts +- **Added a skip step** — gracefully handles the (unlikely) case where the paths filter triggers but no `.sh` files exist +- **Uses `-print0` / `xargs -0`** — handles filenames with spaces safely + +**Step 2: Validate the workflow YAML syntax** + +Run: `python3 -c "import yaml; yaml.safe_load(open('.github/workflows/shellcheck.yml'))" && echo "YAML valid"` +Expected: `YAML valid` + +**Step 3: Dry-run the shellcheck command locally to verify it passes** + +Run: `find . -name '*.sh' -type f -print0 | xargs -0 shellcheck --severity=style --enable=all --external-sources --check-sourced --format=gcc` +Expected: No output (clean — since we fixed entrypoint.sh in Task 1) + +**Step 4: Commit** + +```bash +git add .github/workflows/shellcheck.yml +git commit -m "ci: shellcheck all .sh files with strictest settings" +``` diff --git a/docs/plans/2026-02-10-composite-fs-dedup.md b/docs/plans/2026-02-10-composite-fs-dedup.md new file mode 100644 index 0000000..76ce2ed --- /dev/null +++ b/docs/plans/2026-02-10-composite-fs-dedup.md @@ -0,0 +1,670 @@ +# CompositeFs Deduplication Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Eliminate duplicated delegation logic between `MesaFS` (mod.rs) and `OrgFs` (org.rs) by extracting a shared `CompositeFs` struct. + +**Architecture:** Both `MesaFS` and `OrgFs` implement the same "compositing filesystem" pattern: they own a set of child filesystem instances, each with a `HashMapBridge` for inode/fh translation. The delegation code for `open`, `read`, `release`, `forget`, `getattr`, `statfs`, and the inner branches of `lookup`/`readdir` is nearly identical. We extract this into a generic `CompositeFs` struct that holds the shared state and implements all delegation methods, then refactor both types to embed it. + +**Tech Stack:** Rust, async_trait, tokio + +--- + +## Feasibility Analysis + +### Why not a blanket impl? + +The user's original proposal was: + +```rust +trait Subtrait { ... } +impl Fs for T { ... } +``` + +This is **technically possible** but has significant trade-offs: + +1. **Borrow checker friction:** The subtrait needs accessor methods like `composite_mut(&mut self) -> &mut CompositeFs<...>`. All delegation goes through this single `&mut self` borrow, which prevents the split-borrow patterns the current code relies on (e.g., accessing `self.slots[idx].bridge` and `self.icache` simultaneously). Workable, but requires pre-allocating inodes before closure calls and restructuring some APIs. + +2. **Verbose subtrait definition:** The subtrait needs ~6 methods (`composite()`, `composite_mut()`, `delegation_target()`, `handle_root_lookup()`, `handle_root_readdir()`, `on_forget_cleanup()`), each implemented on both types. The net LOC savings vs thin wrappers is modest. + +3. **Indirection cost:** Readers must understand both the `MescloudFs` subtrait and the blanket impl to follow any `Fs` method. With the composition approach, each `Fs` method is a clear 1-2 line delegation. + +### Recommended approach: CompositeFs (composition) + +Extract a `CompositeFs` struct that owns the shared state and implements all delegation methods. `MesaFS` and `OrgFs` embed it and write thin `impl Fs` wrappers. This: + +- Eliminates ~200 lines of duplicated delegation logic +- Avoids borrow checker complications (direct field access within CompositeFs) +- Keeps `impl Fs` on each type readable (1-line delegations + custom root logic) +- Is a standard Rust composition pattern + +--- + +## Task 1: Create `ChildSlot` and `CompositeFs` structs + +**Files:** +- Create: `src/fs/mescloud/composite.rs` +- Modify: `src/fs/mescloud/mod.rs` (add `mod composite;`) + +**Step 1: Write the `ChildSlot` and `CompositeFs` types** + +Create `src/fs/mescloud/composite.rs`: + +```rust +use std::collections::HashMap; +use std::ffi::OsStr; +use std::time::SystemTime; + +use bytes::Bytes; +use tracing::{trace, warn}; + +use crate::fs::icache::bridge::HashMapBridge; +use crate::fs::icache::{FileTable, IcbResolver}; +use crate::fs::r#trait::{ + DirEntry, DirEntryType, FileAttr, FileHandle, FilesystemStats, Fs, Inode, LockOwner, OpenFile, + OpenFlags, +}; + +use super::common::InodeControlBlock; +use super::common::{GetAttrError, LookupError, OpenError, ReadDirError, ReadError, ReleaseError}; +use super::icache as mescloud_icache; +use super::icache::MescloudICache; + +/// A child filesystem slot: inner filesystem + bidirectional inode/fh bridge. +pub(super) struct ChildSlot { + pub inner: Inner, + pub bridge: HashMapBridge, +} + +/// Generic compositing filesystem that delegates to child `Inner` filesystems. +/// +/// Holds the shared infrastructure (icache, file table, readdir buffer, child +/// slots) and implements all the delegation methods that `MesaFS` and `OrgFs` +/// previously duplicated. +pub(super) struct CompositeFs +where + R: IcbResolver, +{ + pub icache: MescloudICache, + pub file_table: FileTable, + pub readdir_buf: Vec, + /// Maps outer inode → index into `slots` for child-root inodes. + pub child_inodes: HashMap, + pub slots: Vec>, +} +``` + +**Step 2: Verify it compiles** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` +Expected: PASS (new file is just types, no usage yet) + +**Step 3: Commit** + +```bash +git add src/fs/mescloud/composite.rs src/fs/mescloud/mod.rs +git commit -m "refactor: add CompositeFs and ChildSlot structs for shared delegation" +``` + +--- + +## Task 2: Create `InodeCachePeek` trait + +Both `OrgFs` and `RepoFs` expose `inode_table_get_attr()` for parent layers to cache attrs during readdir. Extract this into a trait so `CompositeFs` can call it generically. + +**Files:** +- Modify: `src/fs/mescloud/common.rs` (add trait definition) +- Modify: `src/fs/mescloud/org.rs` (implement trait, remove ad-hoc method) +- Modify: `src/fs/mescloud/repo.rs` (implement trait, remove ad-hoc method) + +**Step 1: Add the trait to common.rs** + +```rust +/// Allows a parent compositor to peek at cached attrs from a child filesystem. +#[async_trait::async_trait] +pub(super) trait InodeCachePeek { + async fn peek_attr(&self, ino: Inode) -> Option; +} +``` + +**Step 2: Implement on OrgFs and RepoFs** + +Replace `pub(crate) async fn inode_table_get_attr` on both types with: + +```rust +#[async_trait::async_trait] +impl InodeCachePeek for OrgFs { + async fn peek_attr(&self, ino: Inode) -> Option { + self.icache.get_attr(ino).await + } +} +``` + +(Same for RepoFs.) + +**Step 3: Update call sites in mod.rs and org.rs** + +Replace `.inode_table_get_attr(...)` calls with `.peek_attr(...)`. + +**Step 4: Verify** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` +Expected: PASS + +**Step 5: Commit** + +```bash +git add src/fs/mescloud/common.rs src/fs/mescloud/org.rs src/fs/mescloud/repo.rs src/fs/mescloud/mod.rs +git commit -m "refactor: extract InodeCachePeek trait from inode_table_get_attr" +``` + +--- + +## Task 3: Implement delegation methods on CompositeFs + +**Files:** +- Modify: `src/fs/mescloud/composite.rs` + +Add all the shared delegation methods. These are the methods that were duplicated between `MesaFS` and `OrgFs`. + +**Step 1: Add helper methods** + +```rust +impl CompositeFs +where + R: IcbResolver, + Inner: Fs< + LookupError = LookupError, + GetAttrError = GetAttrError, + OpenError = OpenError, + ReadError = ReadError, + ReaddirError = ReadDirError, + ReleaseError = ReleaseError, + > + InodeCachePeek + Send, +{ + /// Find the child slot that owns `ino` by walking the parent chain. + pub async fn slot_for_inode(&self, ino: Inode) -> Option { + if let Some(&idx) = self.child_inodes.get(&ino) { + return Some(idx); + } + let mut current = ino; + loop { + let parent = self + .icache + .get_icb(current, |icb| icb.parent) + .await + .flatten()?; + if let Some(&idx) = self.child_inodes.get(&parent) { + return Some(idx); + } + current = parent; + } + } + + /// Allocate an outer file handle and map it through the bridge. + pub fn alloc_fh(&mut self, slot_idx: usize, inner_fh: FileHandle) -> FileHandle { + let fh = self.file_table.allocate(); + self.slots[slot_idx].bridge.insert_fh(fh, inner_fh); + fh + } + + /// Translate an inner inode to an outer inode, allocating if needed. + /// Also inserts a stub ICB into the outer icache. + pub async fn translate_inner_ino( + &mut self, + slot_idx: usize, + inner_ino: Inode, + parent_outer_ino: Inode, + name: &OsStr, + ) -> Inode { + let outer_ino = self.slots[slot_idx] + .bridge + .backward_or_insert_inode(inner_ino, || self.icache.allocate_inode()); + self.icache + .entry_or_insert_icb( + outer_ino, + || InodeControlBlock { + rc: 0, + path: name.into(), + parent: Some(parent_outer_ino), + attr: None, + children: None, + }, + |_| {}, + ) + .await; + outer_ino + } +} +``` + +**Step 2: Add delegation methods** + +```rust + // -- Fs delegation methods -- + + pub async fn delegated_getattr( + &self, + ino: Inode, + ) -> Result { + self.icache.get_attr(ino).await.ok_or_else(|| { + warn!(ino, "getattr on unknown inode"); + GetAttrError::InodeNotFound + }) + } + + pub async fn delegated_open( + &mut self, + ino: Inode, + flags: OpenFlags, + ) -> Result { + let idx = self.slot_for_inode(ino).await.ok_or_else(|| { + warn!(ino, "open on inode not belonging to any child"); + OpenError::InodeNotFound + })?; + let inner_ino = self.slots[idx] + .bridge + .forward_or_insert_inode(ino, || unreachable!("open: ino should be mapped")); + let inner_open = self.slots[idx].inner.open(inner_ino, flags).await?; + let outer_fh = self.alloc_fh(idx, inner_open.handle); + trace!(ino, outer_fh, inner_fh = inner_open.handle, "open: assigned file handle"); + Ok(OpenFile { + handle: outer_fh, + options: inner_open.options, + }) + } + + pub async fn delegated_read( + &mut self, + ino: Inode, + fh: FileHandle, + offset: u64, + size: u32, + flags: OpenFlags, + lock_owner: Option, + ) -> Result { + let idx = self.slot_for_inode(ino).await.ok_or_else(|| { + warn!(ino, "read on inode not belonging to any child"); + ReadError::InodeNotFound + })?; + let inner_ino = self.slots[idx] + .bridge + .forward_or_insert_inode(ino, || unreachable!("read: ino should be mapped")); + let inner_fh = self.slots[idx].bridge.fh_forward(fh).ok_or_else(|| { + warn!(fh, "read: no fh mapping found"); + ReadError::FileNotOpen + })?; + self.slots[idx] + .inner + .read(inner_ino, inner_fh, offset, size, flags, lock_owner) + .await + } + + pub async fn delegated_release( + &mut self, + ino: Inode, + fh: FileHandle, + flags: OpenFlags, + flush: bool, + ) -> Result<(), ReleaseError> { + let idx = self.slot_for_inode(ino).await.ok_or_else(|| { + warn!(ino, "release on inode not belonging to any child"); + ReleaseError::FileNotOpen + })?; + let inner_ino = self.slots[idx] + .bridge + .forward_or_insert_inode(ino, || unreachable!("release: ino should be mapped")); + let inner_fh = self.slots[idx].bridge.fh_forward(fh).ok_or_else(|| { + warn!(fh, "release: no fh mapping found"); + ReleaseError::FileNotOpen + })?; + let result = self.slots[idx] + .inner + .release(inner_ino, inner_fh, flags, flush) + .await; + self.slots[idx].bridge.remove_fh_by_left(fh); + trace!(ino, fh, "release: cleaned up fh mapping"); + result + } + + /// Returns `true` if the inode was evicted (rc dropped to zero). + pub async fn delegated_forget(&mut self, ino: Inode, nlookups: u64) -> bool { + // Propagate forget to inner if applicable. + if let Some(idx) = self.slot_for_inode(ino).await { + if let Some(&inner_ino) = self.slots[idx].bridge.inode_map_get_by_left(ino) { + self.slots[idx].inner.forget(inner_ino, nlookups).await; + } + } + if self.icache.forget(ino, nlookups).await.is_some() { + self.child_inodes.remove(&ino); + for slot in &mut self.slots { + slot.bridge.remove_inode_by_left(ino); + } + true + } else { + false + } + } + + pub fn delegated_statfs(&self) -> FilesystemStats { + self.icache.statfs() + } + + /// Delegation branch for lookup (when parent is owned by a child slot). + pub async fn delegated_lookup( + &mut self, + parent: Inode, + name: &OsStr, + ) -> Result { + let idx = self.slot_for_inode(parent).await.ok_or(LookupError::InodeNotFound)?; + let inner_parent = self.slots[idx] + .bridge + .forward_or_insert_inode(parent, || unreachable!("lookup: parent should be mapped")); + let inner_attr = self.slots[idx].inner.lookup(inner_parent, name).await?; + let inner_ino = inner_attr.common().ino; + let outer_ino = self.translate_inner_ino(idx, inner_ino, parent, name).await; + let outer_attr = self.slots[idx].bridge.attr_backward(inner_attr); + self.icache.cache_attr(outer_ino, outer_attr).await; + let rc = self.icache.inc_rc(outer_ino).await; + trace!(outer_ino, inner_ino, rc, "lookup: resolved via delegation"); + Ok(outer_attr) + } + + /// Delegation branch for readdir (when ino is owned by a child slot). + pub async fn delegated_readdir( + &mut self, + ino: Inode, + ) -> Result<&[DirEntry], ReadDirError> { + let idx = self.slot_for_inode(ino).await.ok_or(ReadDirError::InodeNotFound)?; + let inner_ino = self.slots[idx] + .bridge + .forward_or_insert_inode(ino, || unreachable!("readdir: ino should be mapped")); + let inner_entries = self.slots[idx].inner.readdir(inner_ino).await?; + let inner_entries: Vec = inner_entries.to_vec(); + + let mut outer_entries = Vec::with_capacity(inner_entries.len()); + for entry in &inner_entries { + let outer_child_ino = self + .translate_inner_ino(idx, entry.ino, ino, &entry.name) + .await; + if let Some(inner_attr) = self.slots[idx].inner.peek_attr(entry.ino).await { + let outer_attr = self.slots[idx].bridge.attr_backward(inner_attr); + self.icache.cache_attr(outer_child_ino, outer_attr).await; + } + outer_entries.push(DirEntry { + ino: outer_child_ino, + name: entry.name.clone(), + kind: entry.kind, + }); + } + self.readdir_buf = outer_entries; + Ok(&self.readdir_buf) + } +``` + +**Step 3: Verify** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` +Expected: PASS (methods exist but aren't called yet) + +**Step 4: Commit** + +```bash +git add src/fs/mescloud/composite.rs +git commit -m "refactor: implement delegation methods on CompositeFs" +``` + +--- + +## Task 4: Refactor MesaFS to use CompositeFs + +**Files:** +- Modify: `src/fs/mescloud/mod.rs` + +**Step 1: Replace MesaFS fields with CompositeFs** + +Replace: +```rust +pub struct MesaFS { + icache: MescloudICache, + file_table: FileTable, + readdir_buf: Vec, + org_inodes: HashMap, + org_slots: Vec, +} +``` + +With: +```rust +pub struct MesaFS { + composite: CompositeFs, +} +``` + +Remove the `OrgSlot` struct (replaced by `ChildSlot`). + +**Step 2: Update `MesaFS::new`** + +Replace field initialization with `CompositeFs` construction: +```rust +pub fn new(orgs: impl Iterator, fs_owner: (u32, u32)) -> Self { + let resolver = MesaResolver { fs_owner, block_size: Self::BLOCK_SIZE }; + Self { + composite: CompositeFs { + icache: MescloudICache::new(resolver, Self::ROOT_NODE_INO, fs_owner, Self::BLOCK_SIZE), + file_table: FileTable::new(), + readdir_buf: Vec::new(), + child_inodes: HashMap::new(), + slots: orgs.map(|org_conf| { + let client = MesaClient::builder() + .with_api_key(org_conf.api_key.expose_secret()) + .with_base_path(MESA_API_BASE_URL) + .build(); + let org = OrgFs::new(org_conf.name, client, fs_owner); + ChildSlot { inner: org, bridge: HashMapBridge::new() } + }).collect(), + }, + } +} +``` + +**Step 3: Update helper methods** + +- `inode_role`: access `self.composite.child_inodes` instead of `self.org_inodes` +- `org_slot_for_inode`: replace with `self.composite.slot_for_inode(ino)` +- `ensure_org_inode`: access `self.composite.icache`, `self.composite.slots[idx]`, `self.composite.child_inodes` +- `alloc_fh`: remove (use `self.composite.alloc_fh()`) +- `translate_org_ino_to_mesa`: remove (use `self.composite.translate_inner_ino()`) + +**Step 4: Update `impl Fs for MesaFS`** + +Replace delegation methods with one-line forwards: +```rust +#[async_trait::async_trait] +impl Fs for MesaFS { + type LookupError = LookupError; + type GetAttrError = GetAttrError; + type OpenError = OpenError; + type ReadError = ReadError; + type ReaddirError = ReadDirError; + type ReleaseError = ReleaseError; + + async fn lookup(&mut self, parent: Inode, name: &OsStr) -> Result { + if parent == Self::ROOT_NODE_INO { + // Root children are orgs — custom logic stays here. + let org_name = name.to_str().ok_or(LookupError::InodeNotFound)?; + let org_idx = self.composite.slots.iter() + .position(|s| s.inner.name() == org_name) + .ok_or(LookupError::InodeNotFound)?; + let (ino, attr) = self.ensure_org_inode(org_idx).await; + self.composite.icache.inc_rc(ino).await; + Ok(attr) + } else { + self.composite.delegated_lookup(parent, name).await + } + } + + async fn getattr(&mut self, ino: Inode, _fh: Option) -> Result { + self.composite.delegated_getattr(ino).await + } + + async fn readdir(&mut self, ino: Inode) -> Result<&[DirEntry], ReadDirError> { + if ino == Self::ROOT_NODE_INO { + // Root readdir lists orgs — custom logic stays here. + // ... (keep existing root readdir logic, using self.composite.*) + } else { + self.composite.delegated_readdir(ino).await + } + } + + async fn open(&mut self, ino: Inode, flags: OpenFlags) -> Result { + self.composite.delegated_open(ino, flags).await + } + + async fn read(&mut self, ino: Inode, fh: FileHandle, offset: u64, size: u32, flags: OpenFlags, lock_owner: Option) -> Result { + self.composite.delegated_read(ino, fh, offset, size, flags, lock_owner).await + } + + async fn release(&mut self, ino: Inode, fh: FileHandle, flags: OpenFlags, flush: bool) -> Result<(), ReleaseError> { + self.composite.delegated_release(ino, fh, flags, flush).await + } + + async fn forget(&mut self, ino: Inode, nlookups: u64) { + self.composite.delegated_forget(ino, nlookups).await; + } + + async fn statfs(&mut self) -> Result { + Ok(self.composite.delegated_statfs()) + } +} +``` + +**Step 5: Verify** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` +Expected: PASS (all 36 tests) + +**Step 6: Commit** + +```bash +git add src/fs/mescloud/mod.rs +git commit -m "refactor: MesaFS now delegates to CompositeFs" +``` + +--- + +## Task 5: Refactor OrgFs to use CompositeFs + +**Files:** +- Modify: `src/fs/mescloud/org.rs` + +**Step 1: Replace OrgFs fields with CompositeFs** + +Replace: +```rust +pub struct OrgFs { + name: String, + client: MesaClient, + icache: MescloudICache, + file_table: FileTable, + readdir_buf: Vec, + repo_inodes: HashMap, + owner_inodes: HashMap, + repos: Vec, +} +``` + +With: +```rust +pub struct OrgFs { + name: String, + client: MesaClient, + composite: CompositeFs, + /// Maps org-level owner-dir inodes → owner name (github only). + owner_inodes: HashMap, +} +``` + +Remove the `RepoSlot` struct (replaced by `ChildSlot`). + +**Step 2: Update `OrgFs::new`, helper methods, and `impl Fs`** + +Same pattern as Task 4: +- `new`: build `CompositeFs` instead of individual fields +- `inode_role`: check `self.owner_inodes` and `self.composite.child_inodes` +- `repo_slot_for_inode`: replace with `self.composite.slot_for_inode(ino)` +- `ensure_repo_inode`: use `self.composite.icache.*` and `self.composite.slots` +- `alloc_fh`: remove (use `self.composite.alloc_fh()`) +- `translate_repo_ino_to_org`: remove (use `self.composite.translate_inner_ino()`) +- Delegation Fs methods: one-line forwards to `self.composite.*` +- Root/OwnerDir branches: keep custom logic, using `self.composite.*` for icache access + +**Step 3: Update `impl InodeCachePeek for OrgFs`** + +```rust +#[async_trait::async_trait] +impl InodeCachePeek for OrgFs { + async fn peek_attr(&self, ino: Inode) -> Option { + self.composite.icache.get_attr(ino).await + } +} +``` + +**Step 4: Handle `forget` cleanup for `owner_inodes`** + +```rust +async fn forget(&mut self, ino: Inode, nlookups: u64) { + let evicted = self.composite.delegated_forget(ino, nlookups).await; + if evicted { + self.owner_inodes.remove(&ino); + } +} +``` + +**Step 5: Verify** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` +Expected: PASS (all 36 tests) + +**Step 6: Commit** + +```bash +git add src/fs/mescloud/org.rs +git commit -m "refactor: OrgFs now delegates to CompositeFs" +``` + +--- + +## Task 6: Remove code separators + +Per project conventions, remove the `// ------` section separators from `mod.rs` and `org.rs` while we're in these files. + +**Step 1: Remove separators** + +Delete all lines matching `// -----------` in `mod.rs` and `org.rs`. + +**Step 2: Verify** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` +Expected: PASS + +**Step 3: Commit** + +```bash +git add src/fs/mescloud/mod.rs src/fs/mescloud/org.rs +git commit -m "chore: remove section separator comments per project conventions" +``` + +--- + +## Summary of changes + +| File | Change | +|------|--------| +| `src/fs/mescloud/composite.rs` | **NEW** — `ChildSlot`, `CompositeFs` with all delegation methods | +| `src/fs/mescloud/common.rs` | Add `InodeCachePeek` trait | +| `src/fs/mescloud/mod.rs` | Replace `OrgSlot` + duplicated fields/methods with `CompositeFs`, thin `impl Fs` wrappers | +| `src/fs/mescloud/org.rs` | Replace `RepoSlot` + duplicated fields/methods with `CompositeFs`, thin `impl Fs` wrappers | +| `src/fs/mescloud/repo.rs` | Implement `InodeCachePeek`, remove `inode_table_get_attr` | + +**Estimated net LOC change:** Remove ~150-200 lines of duplicated delegation logic, add ~120 lines of `CompositeFs` (shared once). Net reduction ~30-80 lines with much less duplication. diff --git a/docs/plans/2026-02-10-file-table.md b/docs/plans/2026-02-10-file-table.md new file mode 100644 index 0000000..04ac28c --- /dev/null +++ b/docs/plans/2026-02-10-file-table.md @@ -0,0 +1,280 @@ +# FileTable: Extract File Handle Management from ICache + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Extract file handle allocation into a dedicated `FileTable` type so that icaches are no longer responsible for file handle management. + +**Architecture:** Currently both `ICache` (sync) and `AsyncICache` (async) embed a monotonic file handle counter (`next_fh`). This couples inode caching with file handle allocation — two unrelated concerns. We introduce `FileTable`, a standalone atomic counter (mirroring `InodeFactory` for inodes), owned directly by each filesystem (`MesaFS`, `OrgFs`, `RepoFs`) rather than by the icache layer. + +**Tech Stack:** Rust, `std::sync::atomic::AtomicU64` + +--- + +### Task 1: Create `FileTable` type + +**Files:** +- Create: `src/fs/icache/file_table.rs` +- Modify: `src/fs/icache/mod.rs` + +**Step 1: Write the test** + +Add to `src/fs/icache/file_table.rs`: + +```rust +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn allocate_returns_monotonic_handles() { + let ft = FileTable::new(); + assert_eq!(ft.allocate(), 1); + assert_eq!(ft.allocate(), 2); + assert_eq!(ft.allocate(), 3); + } +} +``` + +**Step 2: Write the implementation** + +Create `src/fs/icache/file_table.rs`: + +```rust +use std::sync::atomic::{AtomicU64, Ordering}; + +use crate::fs::r#trait::FileHandle; + +/// Monotonically increasing file handle allocator. +pub struct FileTable { + next_fh: AtomicU64, +} + +impl FileTable { + pub fn new() -> Self { + Self { + next_fh: AtomicU64::new(1), + } + } + + pub fn allocate(&self) -> FileHandle { + self.next_fh.fetch_add(1, Ordering::Relaxed) + } +} +``` + +**Step 3: Register the module and export** + +In `src/fs/icache/mod.rs`, add: +- `mod file_table;` (private module, like `inode_factory`) +- `pub use file_table::FileTable;` + +**Step 4: Run tests to verify** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` +Expected: PASS — new type compiles and test passes. + +**Step 5: Commit** + +```bash +git add src/fs/icache/file_table.rs src/fs/icache/mod.rs +git commit -m "feat: add FileTable type for file handle allocation" +``` + +--- + +### Task 2: Remove `allocate_fh` from `AsyncICache` + +**Files:** +- Modify: `src/fs/icache/async_cache.rs` + +**Step 1: Remove the `next_fh` field from the struct** + +Remove from `AsyncICache`: +```rust +next_fh: AtomicU64, +``` + +**Step 2: Remove `allocate_fh` method** + +Remove: +```rust +pub fn allocate_fh(&self) -> FileHandle { + self.next_fh.fetch_add(1, Ordering::Relaxed) +} +``` + +**Step 3: Remove `next_fh` initialization from `new()`** + +Remove from `AsyncICache::new()`: +```rust +next_fh: AtomicU64::new(1), +``` + +**Step 4: Remove the `allocate_fh_increments` test** + +Remove the entire test block. + +**Step 5: Clean up unused imports** + +Remove `AtomicU64` from `use std::sync::atomic::{AtomicU64, Ordering};` if only used in test code. Remove `FileHandle` from `use crate::fs::r#trait::{FileHandle, Inode};` if no longer used. + +**Do NOT run verify yet** — Tasks 2–5 must all land together. + +--- + +### Task 3: Remove `allocate_fh` from `ICache` (sync) + +**Files:** +- Modify: `src/fs/icache/cache.rs` + +**Step 1: Remove the `next_fh` field** + +Remove from `ICache`: +```rust +next_fh: FileHandle, +``` + +**Step 2: Remove `allocate_fh` method** + +Remove: +```rust +pub fn allocate_fh(&mut self) -> FileHandle { + let fh = self.next_fh; + self.next_fh += 1; + fh +} +``` + +**Step 3: Remove `next_fh` from constructor** + +Remove from `ICache::new()`: +```rust +next_fh: 1, +``` + +**Step 4: Clean up unused imports** + +Remove `FileHandle` from `use crate::fs::r#trait::{FileHandle, Inode};` if no longer needed. + +**Do NOT run verify yet.** + +--- + +### Task 4: Remove `allocate_fh` from `MescloudICache` + +**Files:** +- Modify: `src/fs/mescloud/icache.rs` + +**Step 1: Remove the `allocate_fh` delegation** + +Remove entirely: +```rust +pub fn allocate_fh(&self) -> FileHandle { + self.inner.allocate_fh() +} +``` + +**Step 2: Clean up unused imports** + +Remove `FileHandle` from the `use crate::fs::r#trait::` import if no longer used in this file. + +**Do NOT run verify yet.** + +--- + +### Task 5: Add `FileTable` to each filesystem + +**Files:** +- Modify: `src/fs/mescloud/mod.rs` (`MesaFS`) +- Modify: `src/fs/mescloud/org.rs` (`OrgFs`) +- Modify: `src/fs/mescloud/repo.rs` (`RepoFs`) + +**Step 1: `MesaFS` — add `FileTable` field** + +Add import: +```rust +use crate::fs::icache::FileTable; +``` + +Add field to `MesaFS`: +```rust +file_table: FileTable, +``` + +Initialize in `MesaFS::new()`: +```rust +file_table: FileTable::new(), +``` + +Change `alloc_fh` to use the file table: +```rust +fn alloc_fh(&mut self, slot_idx: usize, org_fh: FileHandle) -> FileHandle { + let fh = self.file_table.allocate(); + self.org_slots[slot_idx].bridge.insert_fh(fh, org_fh); + fh +} +``` + +**Step 2: `OrgFs` — add `FileTable` field** + +Add import: +```rust +use crate::fs::icache::FileTable; +``` + +Add field to `OrgFs`: +```rust +file_table: FileTable, +``` + +Initialize in `OrgFs::new()`: +```rust +file_table: FileTable::new(), +``` + +Change `alloc_fh` to use the file table: +```rust +fn alloc_fh(&mut self, slot_idx: usize, repo_fh: FileHandle) -> FileHandle { + let fh = self.file_table.allocate(); + self.repos[slot_idx].bridge.insert_fh(fh, repo_fh); + fh +} +``` + +**Step 3: `RepoFs` — add `FileTable` field** + +Add import: +```rust +use crate::fs::icache::FileTable; +``` + +Add field to `RepoFs`: +```rust +file_table: FileTable, +``` + +Initialize in `RepoFs::new()`: +```rust +file_table: FileTable::new(), +``` + +In `open()`, change: +```rust +let fh = self.icache.allocate_fh(); +``` +to: +```rust +let fh = self.file_table.allocate(); +``` + +**Step 4: Run the full verify** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` +Expected: All PASS. + +**Step 5: Commit** + +```bash +git add src/fs/icache/async_cache.rs src/fs/icache/cache.rs src/fs/mescloud/icache.rs src/fs/mescloud/mod.rs src/fs/mescloud/org.rs src/fs/mescloud/repo.rs +git commit -m "refactor: move file handle allocation from icaches to FileTable on each filesystem" +``` diff --git a/docs/plans/2026-02-10-pr31-async-icache-bug-review.md b/docs/plans/2026-02-10-pr31-async-icache-bug-review.md new file mode 100644 index 0000000..978ec49 --- /dev/null +++ b/docs/plans/2026-02-10-pr31-async-icache-bug-review.md @@ -0,0 +1,526 @@ +# PR #31 Async ICache Bug Review + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Fix bugs identified in the async icache PR before merge. + +**Architecture:** The PR replaces a synchronous `HashMap`-based `ICache` with `AsyncICache` built on `scc::HashMap` + `tokio::sync::watch` channels, introduces `CompositeFs` for shared FUSE delegation, and adds `IcbResolver` trait for async inode resolution. + +**Tech Stack:** Rust, tokio, scc::HashMap, FUSE (fuser crate) + +--- + +## Summary of Findings + +| # | Severity | Location | Description | +|---|----------|----------|-------------| +| 1 | Critical | `async_cache.rs:289-322` | `upsert_async` after lock drop can resurrect evicted entries | +| 2 | High | `async_cache.rs:133-160` | TOCTOU between `wait_for_available` and read in `get_icb`/`get_icb_mut` | +| 3 | High | `async_cache.rs:96-120` | `wait_for_available` doesn't loop on InFlight→Available→InFlight | +| 4 | High | `repo.rs:58` | `RepoResolver::resolve` panics on missing stub instead of returning error | +| 5 | Medium | `mod.rs:141-153`, `org.rs:200-215` | `inode_role` falls back to Root in release builds, hiding misrouted ops | +| 6 | Medium | `org.rs:126-144`, `mod.rs:179-196` | `ensure_owner_inode`/`ensure_org_inode` attr-missing path doesn't verify ICB exists | +| 7 | Medium | `async_cache.rs:444-451` | `for_each` uses `iter_sync` from async context (scc docs warn against this) | +| 8 | Medium | `Cargo.toml:47` | `reqwest-blocking-client` feature may deadlock in async context | +| 9 | Low | `repo.rs:268` | Redundant `cache_attr` after `get_or_resolve` in `RepoFs::lookup` | +| 10 | Low | `composite.rs:220-222` | `delegated_forget` iterates ALL slots instead of targeted removal | +| 11 | Low | `mod.rs:3` | Commented-out `local` module left behind | + +Notes on findings NOT included above: +- `ensure_child_ino` TOCTOU (duplicate inodes) — currently safe due to `&mut self` serialization on `Fs` trait. Worth a comment but not a live bug. +- `evict_zero_rc_children` non-atomic scan-then-forget — safe because `nlookups=0` makes the forget conditional on rc still being 0. +- `unreachable!()` in `forward_or_insert_inode` closures — safe due to `&mut self`, same reasoning. +- `delegated_forget` unconditionally propagating to inner FS — pre-existing design, correct by FUSE protocol invariant (inner/outer rc move in lockstep). +- `needs_resolve()` has no TTL for mutable refs — design concern for future, not a bug for fixed-ref mounts. +- `ensure_child_ino` O(n) scans — performance concern, not a correctness bug. +- `insert_icb` infinite loop on repeated resolution failures — liveness concern, unlikely in practice. + +--- + +### Task 1: Fix Critical — `upsert_async` resurrects evicted entries + +**Files:** +- Modify: `src/fs/icache/async_cache.rs:289-322` (stub resolution path) +- Modify: `src/fs/icache/async_cache.rs:330-349` (vacant path) + +**Problem:** In `get_or_resolve`, the code acquires the entry lock via `entry_async`, replaces `Available(stub)` with `InFlight(rx)`, then **drops the lock** before calling the resolver. After resolution, it writes back with `upsert_async`. Between the drop and the upsert, a concurrent `forget()` can evict the entry. `upsert_async` then **re-inserts** a dead inode — one the kernel has already forgotten. This is a reference count leak that persists until unmount. + +The same bug exists on the error path (lines 311-319): if `fallback.rc() > 0`, `upsert_async` restores the stub, but the entry may have been evicted during resolution. + +**Step 1: Write the failing test** + +Add to the test module in `async_cache.rs`: + +```rust +#[tokio::test] +async fn get_or_resolve_does_not_resurrect_evicted_entry() { + // Resolver that takes long enough for a concurrent forget to run + struct SlowResolver; + impl IcbResolver for SlowResolver { + type Icb = StubIcb; + async fn resolve( + &self, + _ino: Inode, + stub: Option, + _cache: &AsyncICache, + ) -> Result { + tokio::time::sleep(Duration::from_millis(50)).await; + Ok(StubIcb::new_resolved()) + } + } + + let cache = AsyncICache::new(SlowResolver, 1); + // Insert a stub that needs resolution, with rc=1 + cache.insert_icb(2, StubIcb::new_stub()).await; + cache.inc_rc(2); + + let resolve_handle = tokio::spawn({ + let cache_ref = &cache; // won't work directly — use Arc + async move { + cache_ref.get_or_resolve(2, |icb| icb.clone()).await + } + }); + + // Wait for resolution to start, then forget + tokio::time::sleep(Duration::from_millis(10)).await; + cache.forget(2, 1).await; // rc drops to 0, entry evicted + + // Resolution completes — should NOT resurrect the entry + let _ = resolve_handle.await; + assert!(!cache.contains(2), "evicted entry was resurrected"); +} +``` + +Note: this test will need to be adapted to the actual test infrastructure (Arc wrapping, proper StubIcb types). The key invariant being tested is: if an entry is evicted during resolution, the resolved value must not be re-inserted. + +**Step 2: Run test to verify it fails** + +Run: `cargo test --quiet -p git-fs get_or_resolve_does_not_resurrect` +Expected: FAIL — entry is resurrected because `upsert_async` unconditionally inserts. + +**Step 3: Implement the fix** + +Replace `upsert_async` with `entry_async` + conditional insert: + +```rust +// After resolver returns Ok(icb): +// Instead of: +// self.inode_table.upsert_async(ino, IcbState::Available(icb)).await; +// +// Use: +match self.inode_table.entry_async(ino).await { + Entry::Occupied(mut occ) => { + // Entry still exists (InFlight from our resolution) — update it + *occ.get_mut() = IcbState::Available(icb); + } + Entry::Vacant(_) => { + // Entry was evicted during resolution — do NOT resurrect + // The kernel has already forgotten this inode. + tracing::debug!(ino, "resolved inode was evicted during resolution, dropping result"); + } +} +``` + +Apply the same pattern to: +1. The error fallback path (lines 311-319) where `upsert_async` restores the stub +2. The vacant path (lines 340-345) where `upsert_async` stores the first resolution + +**Step 4: Run test to verify it passes** + +Run: `cargo test --quiet -p git-fs get_or_resolve_does_not_resurrect` +Expected: PASS + +**Step 5: Run full test suite** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` +Expected: All pass + +**Step 6: Commit** + +```bash +git add src/fs/icache/async_cache.rs +git commit -m "fix(icache): prevent upsert_async from resurrecting evicted entries" +``` + +--- + +### Task 2: Fix High — `wait_for_available` should loop on re-encountering InFlight + +**Files:** +- Modify: `src/fs/icache/async_cache.rs:96-120` + +**Problem:** `wait_for_available` waits once on the watch channel. After waking, it re-reads the entry. If the entry transitioned InFlight→Available→InFlight (another resolution cycle started), the re-read finds InFlight and returns `false` — callers interpret this as "inode does not exist" when it actually does. + +**Step 1: Write the failing test** + +```rust +#[tokio::test] +async fn wait_for_available_retries_on_re_inflight() { + // A resolver that resolves quickly with a stub that still needs_resolve, + // causing a second InFlight cycle when get_or_resolve is called again + // ... (test setup that causes InFlight→Available(stub)→InFlight→Available(resolved)) + // Assert that get_icb returns Some, not None. +} +``` + +**Step 2: Run test to verify it fails** + +Run: `cargo test --quiet -p git-fs wait_for_available_retries` +Expected: FAIL + +**Step 3: Implement the fix** + +Wrap `wait_for_available` in a loop: + +```rust +async fn wait_for_available(&self, ino: Inode) -> bool { + loop { + let rx = self + .inode_table + .read_async(&ino, |_, s| match s { + IcbState::InFlight(rx) => Some(rx.clone()), + IcbState::Available(_) => None, + }) + .await; + + match rx { + None => return false, // key missing + Some(None) => return true, // Available + Some(Some(mut rx)) => { + // Wait for this InFlight to resolve + let _ = rx.changed().await; + // Loop back to re-check — entry might be InFlight again + // from a new resolution cycle, or might be removed + continue; + } + } + } +} +``` + +Also update `get_icb` and `get_icb_mut` to retry when they encounter InFlight after `wait_for_available`: + +```rust +pub async fn get_icb(&self, ino: Inode, f: impl Fn(&R::Icb) -> T) -> Option { + loop { + if !self.wait_for_available(ino).await { + return None; + } + let result = self + .inode_table + .read_async(&ino, |_, state| match state { + IcbState::Available(icb) => Some(f(icb)), + IcbState::InFlight(_) => None, // retry + }) + .await; + match result { + Some(Some(val)) => return Some(val), + Some(None) => continue, // was InFlight, retry + None => return None, // key missing + } + } +} +``` + +**Step 4: Run test to verify it passes** + +Run: `cargo test --quiet -p git-fs wait_for_available_retries` +Expected: PASS + +**Step 5: Run full test suite** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` +Expected: All pass + +**Step 6: Commit** + +```bash +git add src/fs/icache/async_cache.rs +git commit -m "fix(icache): loop in wait_for_available and get_icb on re-encountering InFlight" +``` + +--- + +### Task 3: Fix High — `RepoResolver::resolve` panics on missing stub + +**Files:** +- Modify: `src/fs/mescloud/repo.rs:58` + +**Problem:** `RepoResolver::resolve` uses `unreachable!()` when `stub` is `None`. If any code path ever calls `get_or_resolve` for an inode that was never inserted as a stub, the process panics. This should return an error, not crash. + +**Step 1: Implement the fix** + +```rust +// Replace: +let stub = stub.unwrap_or_else(|| unreachable!("RepoResolver requires a stub ICB")); + +// With: +let stub = stub.ok_or(LookupError::InodeNotFound)?; +``` + +**Step 2: Run full test suite** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` +Expected: All pass + +**Step 3: Commit** + +```bash +git add src/fs/mescloud/repo.rs +git commit -m "fix(repo): return error instead of panicking on missing stub in RepoResolver" +``` + +--- + +### Task 4: Fix Medium — `inode_role` falls back to Root in release builds + +**Files:** +- Modify: `src/fs/mescloud/mod.rs:141-153` (MesaFS) +- Modify: `src/fs/mescloud/org.rs:200-215` (OrgFs) + +**Problem:** When an inode can't be classified, `debug_assert!` fires (stripped in release) and the code falls back to `InodeRole::Root`. In release builds, an unclassifiable inode silently gets the root role — `readdir` on it returns the top-level listing, `lookup` tries to match org/repo names. This can happen if the kernel caches an inode past the 1-second TTL and calls getattr/readdir after the cache has forgotten it. + +**Step 1: Implement the fix** + +Change `inode_role` to return `Option`: + +```rust +fn inode_role(&self, ino: Inode) -> Option { + if ino == Self::ROOT_NODE_INO { + return Some(InodeRole::Root); + } + if self.composite.child_inodes.contains_key(&ino) { + return Some(InodeRole::OrgOwned); + } + if self.composite.slot_for_inode(ino).is_some() { + return Some(InodeRole::OrgOwned); + } + None +} +``` + +Update all callers to handle `None` by returning `ENOENT`: + +```rust +let role = self.inode_role(ino).ok_or(LookupError::InodeNotFound)?; +``` + +**Step 2: Run full test suite** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` +Expected: All pass + +**Step 3: Commit** + +```bash +git add src/fs/mescloud/mod.rs src/fs/mescloud/org.rs +git commit -m "fix(mescloud): return ENOENT for unclassifiable inodes instead of falling back to Root" +``` + +--- + +### Task 5: Fix Medium — `ensure_owner_inode`/`ensure_org_inode` attr-missing path doesn't verify ICB exists + +**Files:** +- Modify: `src/fs/mescloud/org.rs:126-144` +- Modify: `src/fs/mescloud/mod.rs:179-196` + +**Problem:** When the inode exists in the tracking map (`owner_inodes`/`child_inodes`) but the attr is missing from the icache, the code rebuilds the attr and calls `cache_attr`. But if the ICB was evicted entirely, `cache_attr` (which calls `get_icb_mut`) returns `None` and silently does nothing. The caller receives a stale attr with an inode number that the icache doesn't track, leading to subsequent `getattr` failures. + +**Step 1: Implement the fix** + +When the attr is missing AND `cache_attr` effectively no-ops, clean up the stale tracking entry and fall through to the allocation path: + +```rust +// In ensure_owner_inode: +for (&ino, existing_owner) in &self.owner_inodes { + if existing_owner == owner { + if let Some(attr) = self.composite.icache.get_attr(ino).await { + return (ino, attr); + } + // ICB may have been evicted — check if it still exists + if self.composite.icache.contains(ino) { + let now = SystemTime::now(); + let attr = FileAttr::Directory { /* ... */ }; + self.composite.icache.cache_attr(ino, attr).await; + return (ino, attr); + } + // ICB was evicted — fall through to allocate a new one + break; + } +} +// ... allocation path (also remove the stale entry from owner_inodes) +``` + +Note: the `for` loop borrows `self.owner_inodes` immutably, so the stale entry removal must happen after the loop. Use a separate `stale_ino` variable. + +**Step 2: Run full test suite** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` +Expected: All pass + +**Step 3: Commit** + +```bash +git add src/fs/mescloud/org.rs src/fs/mescloud/mod.rs +git commit -m "fix(mescloud): handle evicted ICBs in ensure_owner_inode/ensure_org_inode" +``` + +--- + +### Task 6: Fix Medium — `for_each` uses `iter_sync` from async context + +**Files:** +- Modify: `src/fs/icache/async_cache.rs:444-451` + +**Problem:** The `for_each` method uses `scc::HashMap::scan` (synchronous shard locks) from within `async fn` callers. The scc docs warn against mixing sync and async operations. On a single-threaded tokio runtime this could deadlock; on multi-threaded it causes contention. + +**Step 1: Implement the fix** + +Replace `for_each` with an async-safe alternative. Since `scc::HashMap` provides `scan_async`, use that: + +```rust +pub async fn for_each(&self, mut f: impl FnMut(&Inode, &R::Icb)) { + self.inode_table + .scan_async(|k, v| { + if let IcbState::Available(icb) = v { + f(k, icb); + } + }) + .await; +} +``` + +Check if `scan_async` is available in the version of `scc` being used. If not, document the requirement for a multi-threaded runtime in a comment. + +**Step 2: Run full test suite** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` +Expected: All pass + +**Step 3: Commit** + +```bash +git add src/fs/icache/async_cache.rs +git commit -m "fix(icache): use scan_async instead of iter_sync for for_each" +``` + +--- + +### Task 7: Fix Medium — `reqwest-blocking-client` may deadlock in async context + +**Files:** +- Modify: `Cargo.toml:47` + +**Problem:** The `reqwest-blocking-client` feature uses blocking HTTP, which can panic or deadlock inside a tokio runtime. The batch span exporter runs its own thread so it's likely fine, but the async client is safer. + +**Step 1: Implement the fix** + +```toml +# Replace: +opentelemetry-otlp = { version = "0.29", features = ["http-proto", "trace", "reqwest-blocking-client"], optional = true } + +# With: +opentelemetry-otlp = { version = "0.29", features = ["http-proto", "trace", "reqwest-client"], optional = true } +``` + +**Step 2: Verify it compiles** + +Run: `cargo check --features __otlp_export` +Expected: Compiles without errors. If the OTLP exporter builder API differs for async reqwest, adjust the builder code in `trc.rs` accordingly. + +**Step 3: Run full test suite** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` +Expected: All pass + +**Step 4: Commit** + +```bash +git add Cargo.toml Cargo.lock src/trc.rs +git commit -m "fix(deps): use async reqwest client for OTLP export to avoid blocking in tokio" +``` + +--- + +### Task 8: Fix Low — Redundant `cache_attr` after `get_or_resolve` in `RepoFs::lookup` + +**Files:** +- Modify: `src/fs/mescloud/repo.rs` (lookup method, around line 268) + +**Problem:** `get_or_resolve` already stores the resolved ICB (including attr) via `upsert_async`. The subsequent `cache_attr` reads the ICB back and writes the same attr — two unnecessary shard lock acquisitions. + +**Step 1: Implement the fix** + +Remove the redundant `cache_attr` call: + +```rust +// In RepoFs::lookup, remove this line: +self.icache.cache_attr(ino, attr).await; +``` + +**Step 2: Run full test suite** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` +Expected: All pass + +**Step 3: Commit** + +```bash +git add src/fs/mescloud/repo.rs +git commit -m "fix(repo): remove redundant cache_attr after get_or_resolve in lookup" +``` + +--- + +### Task 9: Fix Low — `delegated_forget` iterates ALL slots + +**Files:** +- Modify: `src/fs/mescloud/composite.rs:220-222` + +**Problem:** `delegated_forget` already identifies the correct slot via `slot_for_inode` (line 212) but then iterates all slots to remove the inode from bridges. This is wasteful. + +**Step 1: Implement the fix** + +```rust +// Replace: +for slot in &mut self.slots { + slot.bridge.remove_inode_by_left(ino); +} + +// With: +if let Some(idx) = self.slot_for_inode(ino) { + self.slots[idx].bridge.remove_inode_by_left(ino); +} +``` + +Wait — looking at the code again, `slot_for_inode` is already called at line 212 and the result is used for forwarding. The removal should use that same index. Check that `inode_to_slot` has already been updated before this point; if `inode_to_slot.remove` happens at line 219 before the bridge cleanup, we need to capture the index earlier. + +**Step 2: Run full test suite** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` +Expected: All pass + +**Step 3: Commit** + +```bash +git add src/fs/mescloud/composite.rs +git commit -m "fix(composite): target single slot in delegated_forget instead of iterating all" +``` + +--- + +## Findings NOT requiring fixes (acknowledged, documented) + +These are design concerns or latent issues that are safe under current architecture (`&mut self` serialization on `Fs` trait): + +1. **`ensure_child_ino` TOCTOU** — safe due to `&mut self`. Add a `// SAFETY:` comment documenting why. +2. **`unreachable!()` in `forward_or_insert_inode`** — safe due to `&mut self`. Would become bugs if `Fs` changes to `&self`. +3. **`delegated_forget` unconditional propagation to inner FS** — correct by FUSE protocol invariant (inner/outer rc move in lockstep). Pre-existing design. +4. **`needs_resolve()` no TTL for mutable refs** — design concern for future. Not a bug for current fixed-ref mounts. +5. **O(n) linear scans in `ensure_child_ino` and `evict_zero_rc_children`** — performance concern for large repos. Worth optimizing with a parent→children index but not a correctness bug. +6. **`insert_icb` infinite loop on repeated failures** — liveness concern. In practice, resolution should eventually succeed. +7. **Commented-out `local` module** — cleanup task, not a bug. +8. **OTLP only in Ugly mode** — appears intentional. Confirm with author. +9. **`readdir` caches `size: 0` placeholder attrs** — standard FUSE pattern. Subsequent `lookup` triggers real resolution. +10. **`OrgResolver`/`MesaResolver` return `children: Some(vec![])`** — intentional. Org/Mesa layers manage children via `readdir`, not the resolver. diff --git a/docs/plans/2026-02-10-pr31-bug-review.md b/docs/plans/2026-02-10-pr31-bug-review.md new file mode 100644 index 0000000..601907e --- /dev/null +++ b/docs/plans/2026-02-10-pr31-bug-review.md @@ -0,0 +1,79 @@ +# PR #31 Bug Review Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:dispatching-parallel-agents to execute this review plan. + +**Goal:** Find bugs in PR #31 (MES-710: Add an async icache) by reviewing every changed file, every domain of concern, and every critical code path. + +**Architecture:** Three-phase review: (1) per-file structural review, (2) cross-cutting domain review, (3) end-to-end code path review. Each phase uses parallel subagents. + +**Tech Stack:** Rust, tokio, scc::HashMap (lock-free concurrent map), FUSE (via fuser crate), watch channels for InFlight signaling. + +--- + +## Phase 1: Per-File Review (parallel agents) + +Each agent reads one file (final state on branch) and the diff, looking for bugs. + +### Agent 1.1: `async_cache.rs` (1210 lines — highest risk) +**File:** `src/fs/icache/async_cache.rs` +**Focus:** Race conditions in InFlight/Available state machine, deadlocks, lost wakeups, `unreachable!` panics, correctness of `forget`/`inc_rc`/`get_or_resolve`. + +### Agent 1.2: `composite.rs` (294 lines) +**File:** `src/fs/mescloud/composite.rs` +**Focus:** Inode/FH translation correctness, bridge cleanup on forget, `readdir_buf` lifetime, `unreachable!` in `forward_or_insert_inode` closures. + +### Agent 1.3: `icache.rs` (mescloud wrapper) +**File:** `src/fs/mescloud/icache.rs` +**Focus:** `needs_resolve()` logic, `evict_zero_rc_children` correctness (iterating while modifying), `ensure_child_ino` O(n) scan, `cache_attr` silent failure. + +### Agent 1.4: `mod.rs` (MesaFS) +**File:** `src/fs/mescloud/mod.rs` +**Focus:** `MesaResolver` always returning `Infallible`, `inode_role` fallback to `Root`, `ensure_org_inode` bridge reset, removed `debug_assert!`s. + +### Agent 1.5: `org.rs` (OrgFs) +**File:** `src/fs/mescloud/org.rs` +**Focus:** `OrgResolver`, `register_repo_slot` orphaned slot handling, `owner_inodes` cleanup on forget, github special casing. + +### Agent 1.6: `repo.rs` (RepoFs) +**File:** `src/fs/mescloud/repo.rs` +**Focus:** `RepoResolver::resolve` with `unreachable!` on missing stub, `build_repo_path` infinite loop potential, `readdir` calling `get_or_resolve` then caching attr again, `path_of_inode` duplication. + +### Agent 1.7: Small files (grouped) +**Files:** `src/fs/icache/file_table.rs`, `src/fs/icache/inode_factory.rs`, `src/fs/icache/mod.rs`, `src/fs/mescloud/common.rs` +**Focus:** Atomic ordering correctness (`Relaxed` for monotonic counters), `IcbLike` requiring `Clone`, error conversion completeness. + +### Agent 1.8: `trc.rs` + `fuser.rs` +**Files:** `src/trc.rs`, `src/fs/fuser.rs` +**Focus:** OTLP shutdown ordering, feature flag correctness, instrument name changes. + +## Phase 2: Domain Review (parallel agents) + +### Agent 2.1: Concurrency & Race Conditions +**Scope:** All files using `AsyncICache`, `scc::HashMap`, `watch` channels +**Focus:** TOCTOU between `wait_for_available` and subsequent `update_async`/`read_async`, ABA problems in InFlight→Available→InFlight transitions, `for_each` + concurrent mutation, deadlock scenarios with nested shard locks. + +### Agent 2.2: FUSE Ref-Counting Correctness +**Scope:** `inc_rc`, `forget`, `lookup` across all filesystem layers +**Focus:** Every `lookup` must `inc_rc` exactly once, every `forget` must propagate to inner FS, `inc_rc` returning `None` must fail the lookup (not silently proceed), ref-count leaks when errors occur after `inc_rc`. + +### Agent 2.3: Error Recovery & Cleanup +**Scope:** `get_or_resolve` error paths, `InFlight` cleanup on resolver failure +**Focus:** Is the `InFlight` entry always removed/restored on error? Does the `watch::Sender` always get dropped? What happens to waiters when resolution fails? Are there resource leaks? + +### Agent 2.4: CompositeFs Bridge Consistency +**Scope:** `composite.rs`, `mod.rs`, `org.rs` — all bridge operations +**Focus:** Are `child_inodes`, `inode_to_slot`, and bridge maps kept in sync? Does `delegated_forget` clean up all three? What about `readdir_buf` aliasing? + +## Phase 3: Code Path Review (parallel agents) + +### Agent 3.1: Lookup Path (FUSE → MesaFS → OrgFs → RepoFs) +**Trace:** `FuserAdapter::lookup` → `MesaFS::lookup` → `CompositeFs::delegated_lookup` → `OrgFs::lookup` → `CompositeFs::delegated_lookup` → `RepoFs::lookup` → `RepoResolver::resolve` +**Focus:** Are inodes properly translated at each boundary? Is `inc_rc` called exactly once at each layer? What happens if the inner lookup succeeds but `inc_rc` returns `None`? + +### Agent 3.2: Readdir Path +**Trace:** `FuserAdapter::readdir` → `MesaFS::readdir` → `CompositeFs::delegated_readdir` → `OrgFs::readdir` → `CompositeFs::delegated_readdir` → `RepoFs::readdir` +**Focus:** `readdir_buf` ownership and aliasing, `evict_zero_rc_children` TOCTOU with concurrent lookups, `translate_inner_ino` creating stubs that may conflict with concurrent resolvers. + +### Agent 3.3: Forget/Eviction Path +**Trace:** `FuserAdapter::forget` → `MesaFS::forget` → `CompositeFs::delegated_forget` → `OrgFs::forget` → `CompositeFs::delegated_forget` → `RepoFs::forget` +**Focus:** Does forget propagate correctly through all layers? Is the bridge cleaned up before or after the inner forget? Can forget race with a concurrent lookup that's incrementing rc? diff --git a/docs/plans/2026-02-10-readdir-icache-caching.md b/docs/plans/2026-02-10-readdir-icache-caching.md new file mode 100644 index 0000000..008a6e0 --- /dev/null +++ b/docs/plans/2026-02-10-readdir-icache-caching.md @@ -0,0 +1,572 @@ +# RepoFs readdir icache caching Implementation Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Make `RepoFs::readdir` read directory listings from the icache (via the resolver) instead of calling the mesa API on every invocation. + +**Architecture:** Add a `children` field to `InodeControlBlock` storing `Option>`. The `RepoResolver` populates this field when resolving directory inodes (it already calls the content API). `readdir` then calls `get_or_resolve` on the icache, which transparently invokes the resolver on cache miss. `needs_resolve()` is updated to return `true` for directory ICBs that lack children, ensuring directories get fully resolved on first access. For a fixed ref, directory contents are immutable, making this cache always valid. + +**Tech Stack:** Rust, tokio, scc (concurrent HashMap), mesa_dev SDK + +--- + +### Task 1: Add `children` field to `InodeControlBlock` + +**Files:** +- Modify: `src/fs/mescloud/icache.rs:1-39` + +**Step 1: Write a failing test for `needs_resolve()` on directory ICBs** + +Add a `#[cfg(test)]` module at the bottom of `src/fs/mescloud/icache.rs`: + +```rust +#[cfg(test)] +mod tests { + use super::*; + use crate::fs::r#trait::DirEntryType; + + fn dummy_dir_attr(ino: Inode) -> FileAttr { + let now = std::time::SystemTime::now(); + FileAttr::Directory { + common: make_common_file_attr(ino, 0o755, now, now, (0, 0), 4096), + } + } + + fn dummy_file_attr(ino: Inode) -> FileAttr { + let now = std::time::SystemTime::now(); + FileAttr::RegularFile { + common: make_common_file_attr(ino, 0o644, now, now, (0, 0), 4096), + size: 100, + blocks: 1, + } + } + + #[test] + fn needs_resolve_stub_returns_true() { + let icb = InodeControlBlock { + parent: Some(1), + rc: 0, + path: "stub".into(), + attr: None, + children: None, + }; + assert!(icb.needs_resolve()); + } + + #[test] + fn needs_resolve_file_with_attr_returns_false() { + let icb = InodeControlBlock { + parent: Some(1), + rc: 1, + path: "file.txt".into(), + attr: Some(dummy_file_attr(2)), + children: None, + }; + assert!(!icb.needs_resolve()); + } + + #[test] + fn needs_resolve_dir_without_children_returns_true() { + let icb = InodeControlBlock { + parent: Some(1), + rc: 1, + path: "dir".into(), + attr: Some(dummy_dir_attr(3)), + children: None, + }; + assert!(icb.needs_resolve()); + } + + #[test] + fn needs_resolve_dir_with_children_returns_false() { + let icb = InodeControlBlock { + parent: Some(1), + rc: 1, + path: "dir".into(), + attr: Some(dummy_dir_attr(3)), + children: Some(vec![ + ("README.md".to_owned(), DirEntryType::RegularFile), + ]), + }; + assert!(!icb.needs_resolve()); + } + + #[test] + fn needs_resolve_dir_with_empty_children_returns_false() { + let icb = InodeControlBlock { + parent: Some(1), + rc: 1, + path: "empty-dir".into(), + attr: Some(dummy_dir_attr(4)), + children: Some(vec![]), + }; + assert!(!icb.needs_resolve()); + } +} +``` + +**Step 2: Run test to verify it fails** + +Run: `cargo test --quiet -p git-fs --lib mescloud::icache::tests` +Expected: FAIL — `InodeControlBlock` doesn't have `children` field yet. + +**Step 3: Add `children` field and update `needs_resolve()`** + +In `src/fs/mescloud/icache.rs`, add the import for `DirEntryType`: + +```rust +use crate::fs::r#trait::{CommonFileAttr, DirEntryType, FileAttr, FilesystemStats, Inode, Permissions}; +``` + +Update the struct: + +```rust +pub struct InodeControlBlock { + pub parent: Option, + pub rc: u64, + pub path: std::path::PathBuf, + /// Cached file attributes from the last lookup. + pub attr: Option, + /// Cached directory children from the resolver (directories only). + pub children: Option>, +} +``` + +Update `new_root`: + +```rust +fn new_root(path: std::path::PathBuf) -> Self { + Self { + rc: 1, + parent: None, + path, + attr: None, + children: None, + } +} +``` + +Update `needs_resolve`: + +```rust +fn needs_resolve(&self) -> bool { + match self.attr { + None => true, + Some(FileAttr::Directory { .. }) => self.children.is_none(), + Some(_) => false, + } +} +``` + +**Step 4: Fix all `InodeControlBlock` construction sites** + +Every place that creates an `InodeControlBlock` literal must add `children: None` (or `children: Some(...)` where appropriate). These are all in `src/fs/mescloud/`: + +1. **`src/fs/mescloud/icache.rs:231`** — `ensure_child_ino` stub: + ```rust + InodeControlBlock { + rc: 0, + path: name.into(), + parent: Some(parent), + attr: None, + children: None, + } + ``` + +2. **`src/fs/mescloud/mod.rs:62`** — `MesaResolver::resolve` stub fallback: + ```rust + let stub = stub.unwrap_or_else(|| InodeControlBlock { + parent: None, + path: "/".into(), + rc: 0, + attr: None, + children: None, + }); + ``` + +3. **`src/fs/mescloud/mod.rs:74`** — `MesaResolver::resolve` return (directories — set `children: Some(vec![])`): + ```rust + Ok(InodeControlBlock { + attr: Some(attr), + children: Some(vec![]), + ..stub + }) + ``` + +4. **`src/fs/mescloud/mod.rs:227`** — `MesaFS::ensure_org_inode` insert: + ```rust + InodeControlBlock { + rc: 0, + path: org_name.as_str().into(), + parent: Some(Self::ROOT_NODE_INO), + attr: None, + children: None, + } + ``` + +5. **`src/fs/mescloud/mod.rs:280`** — `MesaFS::translate_org_ino_to_mesa` factory: + ```rust + InodeControlBlock { + rc: 0, + path: name.into(), + parent: Some(parent_mesa_ino), + attr: None, + children: None, + } + ``` + +6. **`src/fs/mescloud/org.rs:51`** — `OrgResolver::resolve` stub fallback: + ```rust + let stub = stub.unwrap_or_else(|| InodeControlBlock { + parent: None, + path: "/".into(), + rc: 0, + attr: None, + children: None, + }); + ``` + +7. **`src/fs/mescloud/org.rs:63`** — `OrgResolver::resolve` return (directories — set `children: Some(vec![])`): + ```rust + Ok(InodeControlBlock { + attr: Some(attr), + children: Some(vec![]), + ..stub + }) + ``` + +8. **`src/fs/mescloud/org.rs:179-188`** — `OrgFs::ensure_owner_inode` insert: + ```rust + InodeControlBlock { + rc: 0, + path: owner.into(), + parent: Some(Self::ROOT_INO), + attr: None, + children: None, + } + ``` + +9. **`src/fs/mescloud/org.rs:325-334`** — `OrgFs::ensure_repo_inode` insert: + ```rust + InodeControlBlock { + rc: 0, + path: display_name.into(), + parent: Some(parent_ino), + attr: None, + children: None, + } + ``` + +10. **`src/fs/mescloud/org.rs:411`** — `OrgFs::translate_repo_ino_to_org` factory: + ```rust + InodeControlBlock { + rc: 0, + path: name.into(), + parent: Some(parent_org_ino), + attr: None, + children: None, + } + ``` + +11. **`src/fs/mescloud/repo.rs:99`** — `RepoResolver::resolve` return (will be updated in Task 2 — for now, add `children: None`): + ```rust + Ok(InodeControlBlock { + parent: stub.parent, + path: stub.path, + rc: stub.rc, + attr: Some(attr), + children: None, + }) + ``` + +**Step 5: Run tests to verify they pass** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` +Expected: All tests PASS, including the new `needs_resolve` tests. + +**Step 6: Commit** + +```bash +git add src/fs/mescloud/icache.rs src/fs/mescloud/mod.rs src/fs/mescloud/org.rs src/fs/mescloud/repo.rs +git commit -m "feat: add children field to InodeControlBlock for directory caching" +``` + +--- + +### Task 2: Populate `children` in `RepoResolver` + +**Files:** +- Modify: `src/fs/mescloud/repo.rs:37-107` (the `RepoResolver::resolve` impl) + +**Step 1: Update `RepoResolver::resolve` to populate `children` for directories** + +In `src/fs/mescloud/repo.rs`, inside the `resolve` async block, after building `attr`, extract children from `Content::Dir`: + +```rust +async move { + let stub = stub.unwrap_or_else(|| unreachable!("RepoResolver requires a stub ICB")); + let file_path = build_repo_path(stub.parent, &stub.path, cache, RepoFs::ROOT_INO).await; + + let content = client + .org(&org_name) + .repos() + .at(&repo_name) + .content() + .get(Some(ref_.as_str()), file_path.as_deref(), None) + .await + .map_err(MesaApiError::from)?; + + let now = SystemTime::now(); + let attr = match &content { + Content::File(f) => { + let size = f.size.to_u64().unwrap_or(0); + FileAttr::RegularFile { + common: mescloud_icache::make_common_file_attr( + ino, 0o644, now, now, fs_owner, block_size, + ), + size, + blocks: mescloud_icache::blocks_of_size(block_size, size), + } + } + Content::Symlink(s) => { + let size = s.size.to_u64().unwrap_or(0); + FileAttr::RegularFile { + common: mescloud_icache::make_common_file_attr( + ino, 0o644, now, now, fs_owner, block_size, + ), + size, + blocks: mescloud_icache::blocks_of_size(block_size, size), + } + } + Content::Dir(_) => FileAttr::Directory { + common: mescloud_icache::make_common_file_attr( + ino, 0o755, now, now, fs_owner, block_size, + ), + }, + }; + + let children = match content { + Content::Dir(d) => Some( + d.entries + .into_iter() + .filter_map(|e| { + let (name, kind) = match e { + MesaDirEntry::File(f) => (f.name?, DirEntryType::RegularFile), + // TODO(MES-712): return DirEntryType::Symlink once readlink is wired up. + MesaDirEntry::Symlink(s) => (s.name?, DirEntryType::RegularFile), + MesaDirEntry::Dir(d) => (d.name?, DirEntryType::Directory), + }; + Some((name, kind)) + }) + .collect(), + ), + Content::File(_) | Content::Symlink(_) => None, + }; + + Ok(InodeControlBlock { + parent: stub.parent, + path: stub.path, + rc: stub.rc, + attr: Some(attr), + children, + }) +} +``` + +Note: The `match &content` (borrow) for `attr` must come before `match content` (move) for `children`. The existing code already borrows for `attr`, so this change only adds a second `match` that consumes `content`. + +**Step 2: Run tests to verify they pass** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` +Expected: PASS. This is a purely additive change — the resolver now populates `children` but nothing reads it yet. + +**Step 3: Commit** + +```bash +git add src/fs/mescloud/repo.rs +git commit -m "feat: populate children in RepoResolver for directory inodes" +``` + +--- + +### Task 3: Add `From for ReadDirError` conversion + +**Files:** +- Modify: `src/fs/mescloud/common.rs:125-149` + +**Step 1: Write a failing test for the conversion** + +Add tests at the bottom of `src/fs/mescloud/common.rs`: + +```rust +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn lookup_inode_not_found_converts_to_readdir_inode_not_found() { + let err: ReadDirError = LookupError::InodeNotFound.into(); + assert!(matches!(err, ReadDirError::InodeNotFound)); + } + + #[test] + fn lookup_file_does_not_exist_converts_to_readdir_inode_not_found() { + let err: ReadDirError = LookupError::FileDoesNotExist.into(); + assert!(matches!(err, ReadDirError::InodeNotFound)); + } + + #[test] + fn lookup_remote_error_converts_to_readdir_remote_error() { + let api_err = MesaApiError::Response { + status: 500, + body: "test".to_owned(), + }; + let err: ReadDirError = LookupError::RemoteMesaError(api_err).into(); + assert!(matches!(err, ReadDirError::RemoteMesaError(_))); + } +} +``` + +**Step 2: Run test to verify it fails** + +Run: `cargo test --quiet -p git-fs --lib mescloud::common::tests` +Expected: FAIL — `From for ReadDirError` not implemented. + +**Step 3: Add the `From` impl** + +In `src/fs/mescloud/common.rs`, add after the `ReadDirError` definition (before `impl From for i32`): + +```rust +impl From for ReadDirError { + fn from(e: LookupError) -> Self { + match e { + LookupError::RemoteMesaError(api) => Self::RemoteMesaError(api), + LookupError::InodeNotFound | LookupError::FileDoesNotExist => Self::InodeNotFound, + } + } +} +``` + +**Step 4: Run tests to verify they pass** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` +Expected: PASS. + +**Step 5: Commit** + +```bash +git add src/fs/mescloud/common.rs +git commit -m "feat: add From for ReadDirError conversion" +``` + +--- + +### Task 4: Rewrite `RepoFs::readdir` to use icache + +**Files:** +- Modify: `src/fs/mescloud/repo.rs:260-350` (the `readdir` impl) + +**Step 1: Replace the direct API call with `get_or_resolve`** + +Replace the entire `readdir` method body in `src/fs/mescloud/repo.rs`: + +```rust +#[instrument(skip(self), fields(repo = %self.repo_name))] +async fn readdir(&mut self, ino: Inode) -> Result<&[DirEntry], ReadDirError> { + debug_assert!( + self.icache.contains(ino), + "readdir: inode {ino} not in inode table" + ); + debug_assert!( + matches!( + self.icache.get_attr(ino).await, + Some(FileAttr::Directory { .. }) | None + ), + "readdir: inode {ino} has non-directory cached attr" + ); + + let children = self + .icache + .get_or_resolve(ino, |icb| icb.children.clone()) + .await? + .ok_or(ReadDirError::NotADirectory)?; + + trace!(ino, count = children.len(), "readdir: resolved directory listing from icache"); + + let mut entries = Vec::with_capacity(children.len()); + for (name, kind) in &children { + let child_ino = self.icache.ensure_child_ino(ino, OsStr::new(name)).await; + let now = SystemTime::now(); + let attr = match kind { + DirEntryType::Directory => FileAttr::Directory { + common: mescloud_icache::make_common_file_attr( + child_ino, + 0o755, + now, + now, + self.icache.fs_owner(), + self.icache.block_size(), + ), + }, + DirEntryType::RegularFile + | DirEntryType::Symlink + | DirEntryType::CharDevice + | DirEntryType::BlockDevice + | DirEntryType::NamedPipe + | DirEntryType::Socket => FileAttr::RegularFile { + common: mescloud_icache::make_common_file_attr( + child_ino, + 0o644, + now, + now, + self.icache.fs_owner(), + self.icache.block_size(), + ), + size: 0, + blocks: 0, + }, + }; + self.icache.cache_attr(child_ino, attr).await; + entries.push(DirEntry { + ino: child_ino, + name: name.clone().into(), + kind: *kind, + }); + } + + self.readdir_buf = entries; + Ok(&self.readdir_buf) +} +``` + +Key differences from original: +- No `self.path_of_inode(ino)` call +- No `self.client.org(...).repos().at(...).content().get(...)` call +- Instead: `self.icache.get_or_resolve(ino, |icb| icb.children.clone())` reads cached children (resolver fetches on miss) +- Error type uses `?` with `From for ReadDirError` (from Task 3) +- The child inode allocation and attr caching loop is identical + +**Step 2: Run full verification** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` +Expected: PASS. Clippy may warn about unused imports — address in next step. + +**Step 3: Clean up unused imports if any** + +After the change, check whether any imports in `repo.rs` are now unused. The `readdir` method no longer uses: +- `self.client` in readdir (still used by `read`) +- `self.path_of_inode` in readdir (still used by `read`) + +All imports should still be needed since `read` uses `Content` and the resolver uses `MesaDirEntry`. Verify with clippy output. + +**Step 4: Run full verification again** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` +Expected: PASS with no warnings. + +**Step 5: Commit** + +```bash +git add src/fs/mescloud/repo.rs +git commit -m "feat: readdir reads from icache instead of querying API directly" +``` diff --git a/docs/plans/2026-02-10-resolver-as-data-provider.md b/docs/plans/2026-02-10-resolver-as-data-provider.md new file mode 100644 index 0000000..5d45571 --- /dev/null +++ b/docs/plans/2026-02-10-resolver-as-data-provider.md @@ -0,0 +1,334 @@ +# Consolidate Attr Creation Into Resolvers + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Eliminate manual `FileAttr` construction in `ensure_*` methods by delegating attr creation to the resolver via `get_or_resolve`, making the resolver the single source of truth for attribute data. + +**Architecture:** The async icache has a resolver-based state machine (`InFlight`/`Available`) where `get_or_resolve` is the canonical path for populating ICBs. Currently, `ensure_owner_inode`, `ensure_repo_inode`, and `ensure_org_inode` bypass this by manually constructing `FileAttr::Directory` and calling `cache_attr`. This refactoring makes them insert stubs (attr=None) and then call `get_or_resolve`, which triggers the resolver to produce the attr. The resolvers (`OrgResolver`, `MesaResolver`) already contain this exact logic. + +**Tech Stack:** Rust, tokio, scc::HashMap + +--- + +## Context + +### The Problem + +Three `ensure_*` methods duplicate attr construction that their resolvers already handle: + +| Method | File | Resolver | +|---|---|---| +| `ensure_owner_inode` | `src/fs/mescloud/org.rs:152` | `OrgResolver` | +| `ensure_repo_inode` | `src/fs/mescloud/org.rs:276` | `OrgResolver` | +| `ensure_org_inode` | `src/fs/mescloud/mod.rs:181` | `MesaResolver` | + +Each method manually constructs `FileAttr::Directory { common: make_common_file_attr(...) }` in **two places** (existing-entry-missing-attr fallback + new-entry creation), then calls `cache_attr`. The resolvers do the exact same construction. This is ~6 duplicated attr-construction sites. + +### The Fix + +Replace manual construction with the resolver flow: +1. `insert_icb(stub)` — creates entry with `attr: None` +2. `get_or_resolve(ino, |icb| icb.attr)` — resolver populates the attr + +The "existing entry with missing attr" defensive fallback also becomes unnecessary since `get_or_resolve` handles stubs (where `needs_resolve()` returns true) by calling the resolver. + +### What Stays The Same + +- `insert_icb` remains for stub creation (it's the correct way to seed the inode table with parent/path before resolution) +- `cache_attr` remains for cross-layer attr propagation (MesaFS ← OrgFs ← RepoFs via bridges) — this is NOT resolver data +- `ensure_child_ino` in `MescloudICache` stays unchanged (it creates stubs for the repo layer, resolved later by `RepoResolver`) +- `entry_or_insert_icb` stays unchanged (used by `translate_*` methods for bridge-level ICB mirroring) + +--- + +### Task 1: Change `OrgResolver::Error` to `Infallible` + +`OrgResolver::resolve` always returns `Ok(...)` — it synthesizes directory attrs from local data with no I/O. The error type `LookupError` is misleading. Changing to `Infallible` makes the guarantee explicit and aligns with `MesaResolver` which already uses `Infallible`. + +**Files:** +- Modify: `src/fs/mescloud/org.rs:35-69` (OrgResolver impl) + +**Step 1: Update the OrgResolver impl** + +Change the error type and return type: + +```rust +impl IcbResolver for OrgResolver { + type Icb = InodeControlBlock; + type Error = std::convert::Infallible; + + fn resolve( + &self, + ino: Inode, + stub: Option, + _cache: &AsyncICache, + ) -> impl Future> + Send + where + Self: Sized, + { + // ... body unchanged ... + } +} +``` + +**Step 2: Verify** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` + +**Step 3: Commit** + +```bash +git add src/fs/mescloud/org.rs +git commit -m "refactor: change OrgResolver::Error to Infallible" +``` + +--- + +### Task 2: Refactor `ensure_owner_inode` to use resolver + +**Files:** +- Modify: `src/fs/mescloud/org.rs:152-202` + +**Step 1: Replace the method body** + +The new structure: find-or-create the inode, then resolve through the canonical path. + +```rust +async fn ensure_owner_inode(&mut self, owner: &str) -> (Inode, FileAttr) { + let existing_ino = self + .owner_inodes + .iter() + .find_map(|(&ino, existing_owner)| (existing_owner == owner).then_some(ino)); + + let ino = if let Some(ino) = existing_ino { + ino + } else { + let ino = self.icache.allocate_inode(); + self.icache + .insert_icb( + ino, + InodeControlBlock { + rc: 0, + path: owner.into(), + parent: Some(Self::ROOT_INO), + attr: None, + }, + ) + .await; + self.owner_inodes.insert(ino, owner.to_owned()); + ino + }; + + let attr = self + .icache + .get_or_resolve(ino, |icb| icb.attr.expect("resolved ICB must have attr")) + .await + .unwrap(); // OrgResolver is infallible + (ino, attr) +} +``` + +Key changes: +- Removed all manual `FileAttr::Directory` construction (was in 2 places) +- Removed the "attr missing → rebuild" defensive fallback — `get_or_resolve` handles this via the resolver +- Removed `use std::time::SystemTime` usage in this method (resolver handles it) +- Single `get_or_resolve` call covers both "existing with attr" (fast path) and "existing without attr" / "newly created stub" (resolver path) + +**Step 2: Verify** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` + +**Step 3: Commit** + +```bash +git add src/fs/mescloud/org.rs +git commit -m "refactor: ensure_owner_inode delegates attr creation to OrgResolver" +``` + +--- + +### Task 3: Refactor `ensure_repo_inode` to use resolver + +**Files:** +- Modify: `src/fs/mescloud/org.rs:276-361` + +**Step 1: Replace the method body** + +Same pattern. Side-effects (RepoFs creation, bridge setup) happen in the "allocate new" branch before resolution. + +```rust +async fn ensure_repo_inode( + &mut self, + repo_name: &str, + display_name: &str, + default_branch: &str, + parent_ino: Inode, +) -> (Inode, FileAttr) { + let repos = &self.repos; + let existing_ino = self + .repo_inodes + .iter() + .find_map(|(&ino, &idx)| (repos[idx].repo.repo_name() == repo_name).then_some(ino)); + + let ino = if let Some(ino) = existing_ino { + let rc = self.icache.get_icb(ino, |icb| icb.rc).await.unwrap_or(0); + trace!(ino, repo = repo_name, rc, "ensure_repo_inode: reusing"); + ino + } else { + let ino = self.icache.allocate_inode(); + trace!(ino, repo = repo_name, "ensure_repo_inode: allocated new inode"); + + self.icache + .insert_icb( + ino, + InodeControlBlock { + rc: 0, + path: display_name.into(), + parent: Some(parent_ino), + attr: None, + }, + ) + .await; + + let repo = RepoFs::new( + self.client.clone(), + self.name.clone(), + repo_name.to_owned(), + default_branch.to_owned(), + self.icache.fs_owner(), + ); + + let mut bridge = HashMapBridge::new(); + bridge.insert_inode(ino, RepoFs::ROOT_INO); + + let idx = self.repos.len(); + self.repos.push(RepoSlot { repo, bridge }); + self.repo_inodes.insert(ino, idx); + ino + }; + + let attr = self + .icache + .get_or_resolve(ino, |icb| icb.attr.expect("resolved ICB must have attr")) + .await + .unwrap(); // OrgResolver is infallible + (ino, attr) +} +``` + +Key changes: +- Removed all manual `FileAttr::Directory` construction (was in 2 places) +- Removed the "attr missing → rebuilding" warn/fallback path +- Side-effects (RepoFs, bridge, repo_inodes) preserved in the else branch + +**Step 2: Verify** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` + +**Step 3: Commit** + +```bash +git add src/fs/mescloud/org.rs +git commit -m "refactor: ensure_repo_inode delegates attr creation to OrgResolver" +``` + +--- + +### Task 4: Refactor `ensure_org_inode` to use resolver + +**Files:** +- Modify: `src/fs/mescloud/mod.rs:181-253` + +**Step 1: Replace the method body** + +```rust +async fn ensure_org_inode(&mut self, org_idx: usize) -> (Inode, FileAttr) { + let existing_ino = self + .org_inodes + .iter() + .find(|&(_, &idx)| idx == org_idx) + .map(|(&ino, _)| ino); + + let ino = if let Some(ino) = existing_ino { + let rc = self + .icache + .get_icb(ino, |icb| icb.rc) + .await + .unwrap_or(0); + trace!(ino, org_idx, rc, "ensure_org_inode: reusing existing inode"); + ino + } else { + let org_name = self.org_slots[org_idx].org.name().to_owned(); + let ino = self.icache.allocate_inode(); + trace!(ino, org_idx, org = %org_name, "ensure_org_inode: allocated new inode"); + + self.icache + .insert_icb( + ino, + InodeControlBlock { + rc: 0, + path: org_name.as_str().into(), + parent: Some(Self::ROOT_NODE_INO), + attr: None, + }, + ) + .await; + + self.org_inodes.insert(ino, org_idx); + self.org_slots[org_idx] + .bridge + .insert_inode(ino, OrgFs::ROOT_INO); + ino + }; + + let attr = self + .icache + .get_or_resolve(ino, |icb| icb.attr.expect("resolved ICB must have attr")) + .await + .unwrap(); // MesaResolver is infallible + (ino, attr) +} +``` + +Key changes: +- Removed all manual `FileAttr::Directory` construction (was in 2 places) +- Removed the "attr missing → rebuilding" warn/fallback path +- Side-effects (org_inodes, bridge seeding) preserved + +**Step 2: Remove unused `SystemTime` import if no longer needed** + +Check if `SystemTime` is still used in `mod.rs`. It is used in `MesaResolver::resolve` and `MescloudICache::new`, so it stays. + +**Step 3: Verify** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` + +**Step 4: Commit** + +```bash +git add src/fs/mescloud/mod.rs +git commit -m "refactor: ensure_org_inode delegates attr creation to MesaResolver" +``` + +--- + +### Task 5: Remove unused imports + +**Files:** +- Modify: `src/fs/mescloud/org.rs` (check for unused `SystemTime`, `mescloud_icache` usage) +- Modify: `src/fs/mescloud/mod.rs` (same check) + +**Step 1: Check and remove unused imports** + +After the refactoring, check whether `SystemTime` and `mescloud_icache::make_common_file_attr` are still used in each file outside of the resolver. The resolvers still use them, so they likely stay. But verify with clippy. + +**Step 2: Verify** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` + +**Step 3: Commit (if changes needed)** + +```bash +git add src/fs/mescloud/org.rs src/fs/mescloud/mod.rs +git commit -m "chore: remove unused imports after resolver refactoring" +``` diff --git a/docs/plans/2026-02-10-split-async-contains.md b/docs/plans/2026-02-10-split-async-contains.md new file mode 100644 index 0000000..6ce0eaa --- /dev/null +++ b/docs/plans/2026-02-10-split-async-contains.md @@ -0,0 +1,287 @@ +# Split AsyncICache::contains into Sync Methods + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Replace the async `contains()` method with two non-async variants to eliminate debug/release control flow divergence caused by awaiting InFlight entries in debug_asserts. + +**Architecture:** The async `contains()` currently awaits InFlight entries (potentially blocking on network I/O), which changes control flow in debug vs release builds. We split it into `contains()` (sync, key exists in any state) and `contains_resolved()` (sync, key is Available). The internal `wait_for_available()` remains for other methods that genuinely need to await. + +**Tech Stack:** Rust, scc::HashMap (has `read_sync` and `contains_sync`), tokio + +--- + +### Task 1: Add sync `contains` and `contains_resolved` to `AsyncICache` + +**Files:** +- Modify: `src/fs/icache/async_cache.rs:119-122` + +**Step 1: Add the two new methods** + +Add these methods to the `impl AsyncICache` block, replacing the existing `pub async fn contains`: + +```rust + /// Check whether `ino` has an entry in the table (either `InFlight` or `Available`). + /// + /// This is a non-blocking, synchronous check. It does **not** wait for + /// `InFlight` entries to resolve. + pub fn contains(&self, ino: Inode) -> bool { + self.inode_table.contains_sync(&ino) + } + + /// Check whether `ino` is fully resolved (`Available`). + /// + /// Returns `false` if the entry is missing **or** still `InFlight`. + /// This is a non-blocking, synchronous check. + pub fn contains_resolved(&self, ino: Inode) -> bool { + self.inode_table + .read_sync(&ino, |_, s| matches!(s, IcbState::Available(_))) + .unwrap_or(false) + } +``` + +**Step 2: Run verify** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` + +This will fail because tests still call `cache.contains(42).await` on the now non-async method. That's expected — we fix the tests in Task 4. + +**Step 3: Commit** + +```bash +git add src/fs/icache/async_cache.rs +git commit -m "feat: add sync contains() and contains_resolved() to AsyncICache" +``` + +--- + +### Task 2: Update `MescloudICache` delegation + +**Files:** +- Modify: `src/fs/mescloud/icache.rs:101-105` + +**Step 1: Replace the async delegation with sync delegations** + +Replace: +```rust + pub async fn contains(&self, ino: Inode) -> bool { + self.inner.contains(ino).await + } +``` + +With: +```rust + pub fn contains(&self, ino: Inode) -> bool { + self.inner.contains(ino) + } + + pub fn contains_resolved(&self, ino: Inode) -> bool { + self.inner.contains_resolved(ino) + } +``` + +**Step 2: Run verify** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` + +This will still fail from tests — expected. + +**Step 3: Commit** + +```bash +git add src/fs/mescloud/icache.rs +git commit -m "feat: add sync contains/contains_resolved to MescloudICache" +``` + +--- + +### Task 3: Replace `debug_assert!` usages in `repo.rs` + +**Files:** +- Modify: `src/fs/mescloud/repo.rs` (lines 228-231, 260-263, 352, 450-453) + +**Step 1: Update the three debug_asserts** + +In `lookup` (line 228-231), replace: +```rust + debug_assert!( + self.icache.contains(parent).await, + "lookup: parent inode {parent} not in inode table" + ); +``` +With: +```rust + debug_assert!( + self.icache.contains(parent), + "lookup: parent inode {parent} not in inode table" + ); +``` + +In `readdir` (line 260-263), replace: +```rust + debug_assert!( + self.icache.contains(ino).await, + "readdir: inode {ino} not in inode table" + ); +``` +With: +```rust + debug_assert!( + self.icache.contains(ino), + "readdir: inode {ino} not in inode table" + ); +``` + +In `forget` (line 450-453), replace: +```rust + debug_assert!( + self.icache.contains(ino).await, + "forget: inode {ino} not in inode table" + ); +``` +With: +```rust + debug_assert!( + self.icache.contains(ino), + "forget: inode {ino} not in inode table" + ); +``` + +**Step 2: Update the `open` guard** + +In `open` (line 352), replace: +```rust + if !self.icache.contains(ino).await { +``` +With: +```rust + if !self.icache.contains(ino) { +``` + +The `open` method doesn't read ICB data — it only allocates a file handle. Subsequent `read` calls go through `path_of_inode` → `get_icb` which properly awaits InFlight entries. FUSE guarantees the inode was previously looked up, so it must be in the table (InFlight or Available). + +**Step 3: Run verify** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` + +Still expect test failures from `async_cache.rs` tests. + +**Step 4: Commit** + +```bash +git add src/fs/mescloud/repo.rs +git commit -m "fix: use sync contains() in debug_asserts and open guard" +``` + +--- + +### Task 4: Update `async_cache.rs` tests + +**Files:** +- Modify: `src/fs/icache/async_cache.rs` (test module, lines 460+) + +**Step 1: Update tests that used `contains(...).await`** + +The following test assertions need their `.await` removed since `contains()` is now sync: + +`contains_returns_true_for_root` (line 463): +```rust +assert!(cache.contains(1), "root should exist"); +``` + +`contains_returns_false_for_missing` (line 469): +```rust +assert!(!cache.contains(999), "missing inode should not exist"); +``` + +`contains_after_resolver_completes` (line 493): +```rust +assert!(cache.contains(42), "should be true after resolve"); +``` + +`insert_icb_adds_entry` (line 564): +```rust +assert!(cache.contains(42), "inserted entry should exist"); +``` + +`forget_evicts_when_rc_drops_to_zero` (line 700): +```rust +assert!(!cache.contains(42), "evicted entry should be gone"); +``` + +`wait_does_not_miss_signal_on_immediate_complete` (lines 795-801): This test exercises the awaiting behavior of the old `contains`. It should now test `contains_resolved` or be refactored. Replace: +```rust + let result = + tokio::time::timeout(std::time::Duration::from_millis(100), cache.contains(42)).await; + assert_eq!( + result, + Ok(true), + "should not hang on already-completed entry" + ); +``` +With: +```rust + assert!(cache.contains(42), "entry should exist in table"); + assert!( + cache.contains_resolved(42), + "should be resolved after insert_icb overwrote InFlight" + ); +``` + +`get_or_resolve_resolves_missing` (line 840): +```rust +assert!(cache.contains(42)); +``` + +`get_or_resolve_propagates_error` (line 853): +```rust +assert!(!cache.contains(42)); +``` + +**Step 2: Add dedicated tests for `contains_resolved`** + +Add after the existing `contains_returns_false_for_missing` test: + +```rust + #[tokio::test] + async fn contains_resolved_returns_true_for_root() { + let cache = test_cache(); + assert!(cache.contains_resolved(1), "root should be resolved"); + } + + #[tokio::test] + async fn contains_resolved_returns_false_for_missing() { + let cache = test_cache(); + assert!( + !cache.contains_resolved(999), + "missing inode should not be resolved" + ); + } + + #[tokio::test] + async fn contains_resolved_returns_false_for_inflight() { + let cache = test_cache(); + let (_tx, rx) = watch::channel(()); + cache + .inode_table + .upsert_async(42, IcbState::InFlight(rx)) + .await; + assert!(cache.contains(42), "InFlight entry should exist"); + assert!( + !cache.contains_resolved(42), + "InFlight entry should not be resolved" + ); + } +``` + +**Step 3: Run verify** + +Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` + +Expected: ALL PASS + +**Step 4: Commit** + +```bash +git add src/fs/icache/async_cache.rs +git commit -m "test: update tests for sync contains/contains_resolved" +``` diff --git a/src/fs/icache/async_cache.rs b/src/fs/icache/async_cache.rs index be33257..41a43ad 100644 --- a/src/fs/icache/async_cache.rs +++ b/src/fs/icache/async_cache.rs @@ -94,27 +94,26 @@ impl AsyncICache { /// `false` if the entry does not exist. #[instrument(name = "AsyncICache::wait_for_available", skip(self))] async fn wait_for_available(&self, ino: Inode) -> bool { - let rx = self - .inode_table - .read_async(&ino, |_, s| match s { - IcbState::InFlight(rx) => Some(rx.clone()), - IcbState::Available(_) => None, - }) - .await; + loop { + let rx = self + .inode_table + .read_async(&ino, |_, s| match s { + IcbState::InFlight(rx) => Some(rx.clone()), + IcbState::Available(_) => None, + }) + .await; - match rx { - None => false, // key missing - Some(None) => true, // Available - Some(Some(mut rx)) => { - // Wait for the resolver to complete (or fail/drop sender). - // changed() returns Err(RecvError) when sender is dropped, - // which is fine — it means resolution finished. - let _ = rx.changed().await; - // Re-check: entry is now Available or was removed on error. - self.inode_table - .read_async(&ino, |_, s| matches!(s, IcbState::Available(_))) - .await - .unwrap_or(false) + match rx { + None => return false, // key missing + Some(None) => return true, // Available + Some(Some(mut rx)) => { + // Wait for the resolver to complete (or fail/drop sender). + // changed() returns Err(RecvError) when sender is dropped, + // which is fine — it means resolution finished. + let _ = rx.changed().await; + // Loop back — the entry might be InFlight again if another + // resolution cycle started between our wakeup and re-read. + } } } } @@ -130,33 +129,58 @@ impl AsyncICache { /// Read an ICB via closure. **Awaits** if `InFlight`. /// Returns `None` if `ino` doesn't exist. #[instrument(name = "AsyncICache::get_icb", skip(self, f))] - pub async fn get_icb(&self, ino: Inode, f: impl FnOnce(&R::Icb) -> T) -> Option { - if !self.wait_for_available(ino).await { - return None; + // `Sync` is required because `f` is held across `.await` points in the + // loop body; for the resulting future to be `Send`, the captured closure + // must be `Sync` (clippy::future_not_send). + pub async fn get_icb( + &self, + ino: Inode, + f: impl Fn(&R::Icb) -> T + Send + Sync, + ) -> Option { + loop { + if !self.wait_for_available(ino).await { + return None; + } + let result = self + .inode_table + .read_async(&ino, |_, state| match state { + IcbState::Available(icb) => Some(f(icb)), + IcbState::InFlight(_) => None, + }) + .await; + match result { + Some(Some(val)) => return Some(val), + Some(None) => {} // was InFlight, retry + None => return None, // key missing + } } - self.inode_table - .read_async(&ino, |_, state| match state { - IcbState::Available(icb) => Some(f(icb)), - IcbState::InFlight(_) => None, - }) - .await - .flatten() } /// Mutate an ICB via closure. **Awaits** if `InFlight`. /// Returns `None` if `ino` doesn't exist. #[instrument(name = "AsyncICache::get_icb_mut", skip(self, f))] - pub async fn get_icb_mut(&self, ino: Inode, f: impl FnOnce(&mut R::Icb) -> T) -> Option { - if !self.wait_for_available(ino).await { - return None; + pub async fn get_icb_mut( + &self, + ino: Inode, + mut f: impl FnMut(&mut R::Icb) -> T + Send, + ) -> Option { + loop { + if !self.wait_for_available(ino).await { + return None; + } + let result = self + .inode_table + .update_async(&ino, |_, state| match state { + IcbState::Available(icb) => Some(f(icb)), + IcbState::InFlight(_) => None, + }) + .await; + match result { + Some(Some(val)) => return Some(val), + Some(None) => {} // was InFlight, retry + None => return None, // key missing + } } - self.inode_table - .update_async(&ino, |_, state| match state { - IcbState::Available(icb) => Some(f(icb)), - IcbState::InFlight(_) => None, - }) - .await - .flatten() } /// Insert an ICB directly as `Available`. If the entry is currently @@ -1257,6 +1281,68 @@ mod tests { } } + /// Regression test: `get_icb` must survive the entry cycling back to + /// `InFlight` between when `wait_for_available` returns and when + /// `read_async` runs. The loop in `get_icb` should retry and eventually + /// return the final resolved value. + #[tokio::test] + async fn wait_for_available_retries_on_re_inflight() { + let cache = Arc::new(test_cache()); + let ino: Inode = 42; + + // Phase 1: insert an InFlight entry. + let (tx1, rx1) = watch::channel(()); + cache + .inode_table + .upsert_async(ino, IcbState::InFlight(rx1)) + .await; + + // Spawn get_icb — it will wait for InFlight to resolve. + let cache_get = Arc::clone(&cache); + let get_handle = + tokio::spawn(async move { cache_get.get_icb(ino, |icb| icb.path.clone()).await }); + + // Give get_icb time to start waiting on the watch channel. + tokio::task::yield_now().await; + + // Phase 1 complete: transition to Available briefly, then immediately + // back to InFlight (simulates get_or_resolve finding a stub and + // re-entering InFlight for a second resolution). + let (tx2, rx2) = watch::channel(()); + cache + .inode_table + .upsert_async(ino, IcbState::InFlight(rx2)) + .await; + // Signal phase-1 watchers so get_icb wakes up; it will re-read the + // entry and find InFlight again, then loop back to wait. + drop(tx1); + + // Give get_icb time to re-enter the wait loop. + tokio::task::yield_now().await; + + // Phase 2 complete: write the final resolved value. + cache + .inode_table + .upsert_async( + ino, + IcbState::Available(TestIcb { + rc: 1, + path: "/fully-resolved".into(), + resolved: true, + }), + ) + .await; + drop(tx2); + + // get_icb should return the final resolved value (not None). + let result = get_handle.await.expect("get_icb task panicked"); + assert_eq!( + result, + Some(PathBuf::from("/fully-resolved")), + "get_icb must survive re-InFlight and return the final resolved value" + ); + } + /// Regression test: an entry evicted by `forget` during an in-progress /// `get_or_resolve` must NOT be resurrected when resolution completes. #[tokio::test] diff --git a/src/fs/mescloud/icache.rs b/src/fs/mescloud/icache.rs index bae1b5e..c3a861f 100644 --- a/src/fs/mescloud/icache.rs +++ b/src/fs/mescloud/icache.rs @@ -115,7 +115,8 @@ impl> MescloudICache { pub async fn get_icb( &self, ino: Inode, - f: impl FnOnce(&InodeControlBlock) -> T, + // `Sync` required: see comment on `AsyncICache::get_icb`. + f: impl Fn(&InodeControlBlock) -> T + Send + Sync, ) -> Option { self.inner.get_icb(ino, f).await } From 695d0ba867eef18832f3f6528aaea6b592c40ea0 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 21:24:24 -0800 Subject: [PATCH 45/57] fix(repo): return error instead of panicking on missing stub in RepoResolver Replace unreachable!() with proper error handling in RepoResolver::resolve when stub is None. Now returns LookupError::InodeNotFound instead of panicking. --- src/fs/mescloud/repo.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/fs/mescloud/repo.rs b/src/fs/mescloud/repo.rs index b577921..f7a36bd 100644 --- a/src/fs/mescloud/repo.rs +++ b/src/fs/mescloud/repo.rs @@ -55,7 +55,7 @@ impl IcbResolver for RepoResolver { let block_size = self.block_size; async move { - let stub = stub.unwrap_or_else(|| unreachable!("RepoResolver requires a stub ICB")); + let stub = stub.ok_or(LookupError::InodeNotFound)?; let file_path = build_repo_path(stub.parent, &stub.path, cache, RepoFs::ROOT_INO).await; // Non-root inodes must have a resolvable path. From 2f475a2e7680876ffdc71ddddcd12f1beaeb552a Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 21:27:35 -0800 Subject: [PATCH 46/57] fix(mescloud): return ENOENT for unclassifiable inodes instead of falling back to Root --- src/fs/mescloud/mod.rs | 17 +++++++++-------- src/fs/mescloud/org.rs | 19 ++++++++++--------- 2 files changed, 19 insertions(+), 17 deletions(-) diff --git a/src/fs/mescloud/mod.rs b/src/fs/mescloud/mod.rs index 88356b5..34b170a 100644 --- a/src/fs/mescloud/mod.rs +++ b/src/fs/mescloud/mod.rs @@ -138,18 +138,17 @@ impl MesaFS { } /// Classify an inode by its role. - fn inode_role(&self, ino: Inode) -> InodeRole { + fn inode_role(&self, ino: Inode) -> Option { if ino == Self::ROOT_NODE_INO { - return InodeRole::Root; + return Some(InodeRole::Root); } if self.composite.child_inodes.contains_key(&ino) { - return InodeRole::OrgOwned; + return Some(InodeRole::OrgOwned); } if self.composite.slot_for_inode(ino).is_some() { - return InodeRole::OrgOwned; + return Some(InodeRole::OrgOwned); } - debug_assert!(false, "inode {ino} not found in any org slot"); - InodeRole::Root + None } /// Ensure a mesa-level inode exists for the org at `org_idx`. @@ -252,7 +251,8 @@ impl Fs for MesaFS { #[instrument(name = "MesaFS::lookup", skip(self))] async fn lookup(&mut self, parent: Inode, name: &OsStr) -> Result { - match self.inode_role(parent) { + let role = self.inode_role(parent).ok_or(LookupError::InodeNotFound)?; + match role { InodeRole::Root => { let org_name = name.to_str().ok_or(LookupError::InodeNotFound)?; let org_idx = self @@ -288,7 +288,8 @@ impl Fs for MesaFS { #[instrument(name = "MesaFS::readdir", skip(self))] async fn readdir(&mut self, ino: Inode) -> Result<&[DirEntry], ReadDirError> { - match self.inode_role(ino) { + let role = self.inode_role(ino).ok_or(ReadDirError::InodeNotFound)?; + match role { InodeRole::Root => { let org_info: Vec<(usize, String)> = self .composite diff --git a/src/fs/mescloud/org.rs b/src/fs/mescloud/org.rs index bb01a2f..cb82241 100644 --- a/src/fs/mescloud/org.rs +++ b/src/fs/mescloud/org.rs @@ -197,21 +197,20 @@ impl OrgFs { } /// Classify an inode by its role. - fn inode_role(&self, ino: Inode) -> InodeRole { + fn inode_role(&self, ino: Inode) -> Option { if ino == Self::ROOT_INO { - return InodeRole::OrgRoot; + return Some(InodeRole::OrgRoot); } if self.owner_inodes.contains_key(&ino) { - return InodeRole::OwnerDir; + return Some(InodeRole::OwnerDir); } if self.composite.child_inodes.contains_key(&ino) { - return InodeRole::RepoOwned; + return Some(InodeRole::RepoOwned); } if self.composite.slot_for_inode(ino).is_some() { - return InodeRole::RepoOwned; + return Some(InodeRole::RepoOwned); } - debug_assert!(false, "inode {ino} not found in any repo slot"); - InodeRole::OrgRoot + None } /// Ensure an inode + `RepoFs` exists for the given repo name. @@ -388,7 +387,8 @@ impl Fs for OrgFs { #[instrument(name = "OrgFs::lookup", skip(self), fields(org = %self.name))] async fn lookup(&mut self, parent: Inode, name: &OsStr) -> Result { - match self.inode_role(parent) { + let role = self.inode_role(parent).ok_or(LookupError::InodeNotFound)?; + match role { InodeRole::OrgRoot => { // TODO(MES-674): Cleanup "special" casing for github. let name_str = name.to_str().ok_or(LookupError::InodeNotFound)?; @@ -470,7 +470,8 @@ impl Fs for OrgFs { #[instrument(name = "OrgFs::readdir", skip(self), fields(org = %self.name))] async fn readdir(&mut self, ino: Inode) -> Result<&[DirEntry], ReadDirError> { - match self.inode_role(ino) { + let role = self.inode_role(ino).ok_or(ReadDirError::InodeNotFound)?; + match role { InodeRole::OrgRoot => { // TODO(MES-674): Cleanup "special" casing for github. if self.is_github() { From db1c0c16f54ede5177b49686e735671e44a6835d Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 21:29:40 -0800 Subject: [PATCH 47/57] fix(mescloud): handle evicted ICBs in ensure_owner_inode/ensure_org_inode --- src/fs/mescloud/mod.rs | 45 ++++++++++++++++++++++++++---------------- src/fs/mescloud/org.rs | 36 +++++++++++++++++++++------------ 2 files changed, 51 insertions(+), 30 deletions(-) diff --git a/src/fs/mescloud/mod.rs b/src/fs/mescloud/mod.rs index 34b170a..0e32933 100644 --- a/src/fs/mescloud/mod.rs +++ b/src/fs/mescloud/mod.rs @@ -156,12 +156,14 @@ impl MesaFS { /// Does NOT bump rc. async fn ensure_org_inode(&mut self, org_idx: usize) -> (Inode, FileAttr) { // Check if an inode already exists. - if let Some((&existing_ino, _)) = self + let existing_ino = self .composite .child_inodes .iter() .find(|&(_, &idx)| idx == org_idx) - { + .map(|(&ino, _)| ino); + + if let Some(existing_ino) = existing_ino { if let Some(attr) = self.composite.icache.get_attr(existing_ino).await { let rc = self .composite @@ -175,24 +177,33 @@ impl MesaFS { ); return (existing_ino, attr); } - // Attr missing — rebuild. + if self.composite.icache.contains(existing_ino) { + // ICB exists but attr missing — rebuild and cache. + warn!( + ino = existing_ino, + org_idx, "ensure_org_inode: attr missing, rebuilding" + ); + let now = SystemTime::now(); + let attr = FileAttr::Directory { + common: mescloud_icache::make_common_file_attr( + existing_ino, + 0o755, + now, + now, + self.composite.icache.fs_owner(), + self.composite.icache.block_size(), + ), + }; + self.composite.icache.cache_attr(existing_ino, attr).await; + return (existing_ino, attr); + } + // ICB was evicted — clean up stale tracking entries. warn!( ino = existing_ino, - org_idx, "ensure_org_inode: attr missing, rebuilding" + org_idx, "ensure_org_inode: ICB evicted, cleaning up stale entry" ); - let now = SystemTime::now(); - let attr = FileAttr::Directory { - common: mescloud_icache::make_common_file_attr( - existing_ino, - 0o755, - now, - now, - self.composite.icache.fs_owner(), - self.composite.icache.block_size(), - ), - }; - self.composite.icache.cache_attr(existing_ino, attr).await; - return (existing_ino, attr); + self.composite.child_inodes.remove(&existing_ino); + self.composite.inode_to_slot.remove(&existing_ino); } // Allocate new. diff --git a/src/fs/mescloud/org.rs b/src/fs/mescloud/org.rs index cb82241..10476e2 100644 --- a/src/fs/mescloud/org.rs +++ b/src/fs/mescloud/org.rs @@ -123,26 +123,36 @@ impl OrgFs { /// TODO(MES-674): Cleanup "special" casing for github. async fn ensure_owner_inode(&mut self, owner: &str) -> (Inode, FileAttr) { // Check existing + let mut stale_ino = None; for (&ino, existing_owner) in &self.owner_inodes { if existing_owner == owner { if let Some(attr) = self.composite.icache.get_attr(ino).await { return (ino, attr); } - let now = SystemTime::now(); - let attr = FileAttr::Directory { - common: mescloud_icache::make_common_file_attr( - ino, - 0o755, - now, - now, - self.composite.icache.fs_owner(), - self.composite.icache.block_size(), - ), - }; - self.composite.icache.cache_attr(ino, attr).await; - return (ino, attr); + if self.composite.icache.contains(ino) { + // ICB exists but attr missing — rebuild and cache + let now = SystemTime::now(); + let attr = FileAttr::Directory { + common: mescloud_icache::make_common_file_attr( + ino, + 0o755, + now, + now, + self.composite.icache.fs_owner(), + self.composite.icache.block_size(), + ), + }; + self.composite.icache.cache_attr(ino, attr).await; + return (ino, attr); + } + // ICB was evicted — mark for cleanup + stale_ino = Some(ino); + break; } } + if let Some(ino) = stale_ino { + self.owner_inodes.remove(&ino); + } // Allocate new let ino = self.composite.icache.allocate_inode(); From 2f7aa581ed4a6ee0310dc0ef62ca9c07b1dc70a6 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 21:39:12 -0800 Subject: [PATCH 48/57] fix(icache): use iter_async instead of iter_sync for for_each --- src/fs/icache/async_cache.rs | 33 ++++++++++++++++++++------------- src/fs/mescloud/icache.rs | 24 ++++++++++++++---------- 2 files changed, 34 insertions(+), 23 deletions(-) diff --git a/src/fs/icache/async_cache.rs b/src/fs/icache/async_cache.rs index 41a43ad..84003da 100644 --- a/src/fs/icache/async_cache.rs +++ b/src/fs/icache/async_cache.rs @@ -479,13 +479,16 @@ impl AsyncICache { } /// Iterate over all `Available` entries (skips `InFlight`). - pub fn for_each(&self, mut f: impl FnMut(&Inode, &R::Icb)) { - self.inode_table.iter_sync(|ino, state| { - if let IcbState::Available(icb) = state { - f(ino, icb); - } - true // continue iteration - }); + /// Async-safe iteration using `iter_async` to avoid contention on single-threaded runtimes. + pub async fn for_each(&self, mut f: impl FnMut(&Inode, &R::Icb)) { + self.inode_table + .iter_async(|ino, state| { + if let IcbState::Available(icb) = state { + f(ino, icb); + } + true // continue iteration + }) + .await; } } @@ -901,9 +904,11 @@ mod tests { .await; let mut seen = std::collections::HashSet::new(); - cache.for_each(|ino, _icb| { - seen.insert(*ino); - }); + cache + .for_each(|ino, _icb| { + seen.insert(*ino); + }) + .await; assert_eq!(seen.len(), 3, "should see all 3 entries"); assert!(seen.contains(&1), "should contain root"); assert!(seen.contains(&2), "should contain inode 2"); @@ -921,9 +926,11 @@ mod tests { .await; let mut count = 0; - cache.for_each(|_, _| { - count += 1; - }); + cache + .for_each(|_, _| { + count += 1; + }) + .await; assert_eq!(count, 1, "only root, not the InFlight entry"); } diff --git a/src/fs/mescloud/icache.rs b/src/fs/mescloud/icache.rs index c3a861f..82f43b9 100644 --- a/src/fs/mescloud/icache.rs +++ b/src/fs/mescloud/icache.rs @@ -197,11 +197,13 @@ impl> MescloudICache { /// Returns the number of evicted entries. pub async fn evict_zero_rc_children(&self, parent: Inode) -> usize { let mut to_evict = Vec::new(); - self.inner.for_each(|&ino, icb| { - if icb.rc == 0 && icb.parent == Some(parent) { - to_evict.push(ino); - } - }); + self.inner + .for_each(|&ino, icb| { + if icb.rc == 0 && icb.parent == Some(parent) { + to_evict.push(ino); + } + }) + .await; let count = to_evict.len(); for ino in to_evict { self.inner.forget(ino, 0).await; @@ -215,11 +217,13 @@ impl> MescloudICache { pub async fn ensure_child_ino(&self, parent: Inode, name: &OsStr) -> Inode { // Search for existing child by parent + name let mut existing_ino = None; - self.inner.for_each(|&ino, icb| { - if icb.parent == Some(parent) && icb.path.as_os_str() == name { - existing_ino = Some(ino); - } - }); + self.inner + .for_each(|&ino, icb| { + if icb.parent == Some(parent) && icb.path.as_os_str() == name { + existing_ino = Some(ino); + } + }) + .await; if let Some(ino) = existing_ino { return ino; From 8a51f426e311cf851558153ecc83fbd128c89d80 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 21:40:50 -0800 Subject: [PATCH 49/57] fix(deps): use async reqwest client for OTLP export to avoid blocking in tokio --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index eab0bc2..25329d0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -44,7 +44,7 @@ inquire = "0.9.2" tracing-indicatif = "0.3.14" opentelemetry = { version = "0.29", optional = true } opentelemetry_sdk = { version = "0.29", features = ["rt-tokio"], optional = true } -opentelemetry-otlp = { version = "0.29", features = ["http-proto", "trace", "reqwest-blocking-client"], optional = true } +opentelemetry-otlp = { version = "0.29", features = ["http-proto", "trace", "reqwest-client"], optional = true } tracing-opentelemetry = { version = "0.30", optional = true } [features] From adfb97698670cce45c8726c59d8bd1057db86645 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 21:41:22 -0800 Subject: [PATCH 50/57] fix(repo): remove redundant cache_attr after get_or_resolve in lookup --- src/fs/mescloud/repo.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/fs/mescloud/repo.rs b/src/fs/mescloud/repo.rs index f7a36bd..2d3d62a 100644 --- a/src/fs/mescloud/repo.rs +++ b/src/fs/mescloud/repo.rs @@ -265,7 +265,6 @@ impl Fs for RepoFs { .get_or_resolve(ino, |icb| icb.attr) .await? .ok_or(LookupError::InodeNotFound)?; - self.icache.cache_attr(ino, attr).await; let rc = self .icache From ef50487f98d77eb85be621430571b6ca36530fb5 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 21:42:01 -0800 Subject: [PATCH 51/57] fix(composite): target single slot in delegated_forget instead of iterating all --- src/fs/mescloud/composite.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/fs/mescloud/composite.rs b/src/fs/mescloud/composite.rs index 0b1ec7d..84b39b4 100644 --- a/src/fs/mescloud/composite.rs +++ b/src/fs/mescloud/composite.rs @@ -209,7 +209,8 @@ where #[must_use] #[instrument(name = "CompositeFs::delegated_forget", skip(self))] pub async fn delegated_forget(&mut self, ino: Inode, nlookups: u64) -> bool { - if let Some(idx) = self.slot_for_inode(ino) + let slot_idx = self.slot_for_inode(ino); + if let Some(idx) = slot_idx && let Some(&inner_ino) = self.slots[idx].bridge.inode_map_get_by_left(ino) { self.slots[idx].inner.forget(inner_ino, nlookups).await; @@ -217,8 +218,8 @@ where if self.icache.forget(ino, nlookups).await.is_some() { self.child_inodes.remove(&ino); self.inode_to_slot.remove(&ino); - for slot in &mut self.slots { - slot.bridge.remove_inode_by_left(ino); + if let Some(idx) = slot_idx { + self.slots[idx].bridge.remove_inode_by_left(ino); } true } else { From 89ef0ff2e29121d1726cb7d9bd3e9dd13b7fd254 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 22:08:08 -0800 Subject: [PATCH 52/57] fix(repo): don't cache placeholder file attrs in readdir readdir was caching FileAttr::RegularFile { size: 0, blocks: 0 } for every file child. Since needs_resolve() returns false for files with any attr set, subsequent lookups via get_or_resolve would return the stale size=0 instead of calling the resolver for the real file size. Only cache directory attrs in readdir. File attrs are left as None so that lookup triggers the resolver. --- src/fs/mescloud/repo.rs | 33 ++++++++++----------------------- 1 file changed, 10 insertions(+), 23 deletions(-) diff --git a/src/fs/mescloud/repo.rs b/src/fs/mescloud/repo.rs index 2d3d62a..bf73b15 100644 --- a/src/fs/mescloud/repo.rs +++ b/src/fs/mescloud/repo.rs @@ -318,9 +318,13 @@ impl Fs for RepoFs { let mut entries = Vec::with_capacity(children.len()); for (name, kind) in &children { let child_ino = self.icache.ensure_child_ino(ino, OsStr::new(name)).await; - let now = SystemTime::now(); - let attr = match kind { - DirEntryType::Directory => FileAttr::Directory { + // Only cache directory attrs in readdir. File attrs are left as + // None so that lookup triggers the resolver to fetch the real file + // size. Caching placeholder file attrs (size=0) would poison + // needs_resolve(), preventing resolution on subsequent lookups. + if *kind == DirEntryType::Directory { + let now = SystemTime::now(); + let attr = FileAttr::Directory { common: mescloud_icache::make_common_file_attr( child_ino, 0o755, @@ -329,26 +333,9 @@ impl Fs for RepoFs { self.icache.fs_owner(), self.icache.block_size(), ), - }, - DirEntryType::RegularFile - | DirEntryType::Symlink - | DirEntryType::CharDevice - | DirEntryType::BlockDevice - | DirEntryType::NamedPipe - | DirEntryType::Socket => FileAttr::RegularFile { - common: mescloud_icache::make_common_file_attr( - child_ino, - 0o644, - now, - now, - self.icache.fs_owner(), - self.icache.block_size(), - ), - size: 0, - blocks: 0, - }, - }; - self.icache.cache_attr(child_ino, attr).await; + }; + self.icache.cache_attr(child_ino, attr).await; + } entries.push(DirEntry { ino: child_ino, name: name.clone().into(), From e996b78e00579b7c56de5aea566dedc4f0f48a0b Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 22:11:10 -0800 Subject: [PATCH 53/57] fix(composite): skip inner forget for child-root inodes delegated_forget was propagating forget(inner_root_ino, nlookups) to the inner filesystem. The inner root's rc=1 is an initialization invariant independent of outer FUSE lookups. When nlookups >= 1, the inner root was evicted, making the inner filesystem non-functional on re-access (readdir/lookup would fail with InodeNotFound). Now child-root inodes (those in child_inodes) skip inner forget propagation entirely. --- src/fs/mescloud/composite.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/fs/mescloud/composite.rs b/src/fs/mescloud/composite.rs index 84b39b4..07ce681 100644 --- a/src/fs/mescloud/composite.rs +++ b/src/fs/mescloud/composite.rs @@ -206,11 +206,18 @@ where /// Propagate forget to the inner filesystem, evict from icache, and clean /// up bridge mappings. Returns `true` if the inode was evicted. + /// + /// Child-root inodes (those in `child_inodes`) do NOT propagate forget to + /// the inner filesystem: the inner root's `rc=1` is an initialization + /// invariant unrelated to outer FUSE lookup counts. Propagating would + /// evict the inner root, breaking all subsequent operations on that child. #[must_use] #[instrument(name = "CompositeFs::delegated_forget", skip(self))] pub async fn delegated_forget(&mut self, ino: Inode, nlookups: u64) -> bool { let slot_idx = self.slot_for_inode(ino); - if let Some(idx) = slot_idx + let is_child_root = self.child_inodes.contains_key(&ino); + if !is_child_root + && let Some(idx) = slot_idx && let Some(&inner_ino) = self.slots[idx].bridge.inode_map_get_by_left(ino) { self.slots[idx].inner.forget(inner_ino, nlookups).await; From 8bc331671b8f91985a7a381bd477713ab11914d4 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 22:14:09 -0800 Subject: [PATCH 54/57] fix(icache): clean up bridge and inode_to_slot on evict_zero_rc_children evict_zero_rc_children called AsyncICache::forget directly, bypassing CompositeFs cleanup. The inode_to_slot and bridge inode_map entries for evicted inodes were never removed, causing unbounded memory growth. Now returns Vec of evicted inodes so delegated_readdir can clean up the associated CompositeFs state. --- src/fs/mescloud/composite.rs | 8 +++++++- src/fs/mescloud/icache.rs | 15 +++++++++------ 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/src/fs/mescloud/composite.rs b/src/fs/mescloud/composite.rs index 07ce681..6dbac25 100644 --- a/src/fs/mescloud/composite.rs +++ b/src/fs/mescloud/composite.rs @@ -280,7 +280,13 @@ where .forward_or_insert_inode(ino, || unreachable!("readdir: ino should be mapped")); let inner_entries = self.slots[idx].inner.readdir(inner_ino).await?; let inner_entries: Vec = inner_entries.to_vec(); - self.icache.evict_zero_rc_children(ino).await; + let evicted = self.icache.evict_zero_rc_children(ino).await; + for evicted_ino in evicted { + if let Some(slot) = self.inode_to_slot.remove(&evicted_ino) { + self.slots[slot].bridge.remove_inode_by_left(evicted_ino); + } + self.child_inodes.remove(&evicted_ino); + } let mut outer_entries = Vec::with_capacity(inner_entries.len()); for entry in &inner_entries { let outer_child_ino = self diff --git a/src/fs/mescloud/icache.rs b/src/fs/mescloud/icache.rs index 82f43b9..41893d3 100644 --- a/src/fs/mescloud/icache.rs +++ b/src/fs/mescloud/icache.rs @@ -194,8 +194,9 @@ impl> MescloudICache { } /// Evict all `Available` children of `parent` that have `rc == 0`. - /// Returns the number of evicted entries. - pub async fn evict_zero_rc_children(&self, parent: Inode) -> usize { + /// Returns the list of evicted inode numbers so callers can clean up + /// associated state (e.g., bridge mappings, slot tracking). + pub async fn evict_zero_rc_children(&self, parent: Inode) -> Vec { let mut to_evict = Vec::new(); self.inner .for_each(|&ino, icb| { @@ -204,11 +205,13 @@ impl> MescloudICache { } }) .await; - let count = to_evict.len(); + let mut evicted = Vec::new(); for ino in to_evict { - self.inner.forget(ino, 0).await; + if self.inner.forget(ino, 0).await.is_some() { + evicted.push(ino); + } } - count + evicted } /// Find an existing child by (parent, name) or allocate a new inode. @@ -414,7 +417,7 @@ mod tests { .await; let evicted = cache.evict_zero_rc_children(1).await; - assert_eq!(evicted, 2, "should evict 2 zero-rc children of root"); + assert_eq!(evicted.len(), 2, "should evict 2 zero-rc children of root"); assert!(!cache.contains(10), "child_a should be evicted"); assert!(!cache.contains(11), "child_b should be evicted"); From 37ec291790444d2a23d2205181415d8edbf43e34 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 22:17:22 -0800 Subject: [PATCH 55/57] fix(bridge): warn on missing inode mapping in attr_backward attr_backward silently returned the raw inner inode number when a bridge mapping was missing. Now logs a warning so the issue is visible in traces. --- src/fs/icache/bridge.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/fs/icache/bridge.rs b/src/fs/icache/bridge.rs index 4846549..e674a56 100644 --- a/src/fs/icache/bridge.rs +++ b/src/fs/icache/bridge.rs @@ -65,8 +65,17 @@ impl HashMapBridge { /// Rewrite the `ino` field in a [`FileAttr`] from right (inner) to left (outer) namespace. pub fn attr_backward(&self, attr: FileAttr) -> FileAttr { - let backward = - |ino: Inode| -> Inode { self.inode_map.get_by_right(&ino).copied().unwrap_or(ino) }; + let backward = |ino: Inode| -> Inode { + if let Some(&left) = self.inode_map.get_by_right(&ino) { + left + } else { + tracing::warn!( + inner_ino = ino, + "attr_backward: no bridge mapping, using raw inner inode" + ); + ino + } + }; rewrite_attr_ino(attr, backward) } From 42fff577d28ff554f867b8deb421e2584cbb6524 Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 22:20:24 -0800 Subject: [PATCH 56/57] chore: add defensive warnings and guards for potential issues - Document ensure_child_ino TOCTOU invariant (safe due to &mut self) - Add MAX_DEPTH=1024 cycle protection to build_repo_path and path_of_inode - Warn on bridge reset in register_repo_slot (leaks inner icache entries) --- src/fs/mescloud/icache.rs | 7 +++++++ src/fs/mescloud/org.rs | 6 ++++++ src/fs/mescloud/repo.rs | 28 ++++++++++++++++++++++++++-- 3 files changed, 39 insertions(+), 2 deletions(-) diff --git a/src/fs/mescloud/icache.rs b/src/fs/mescloud/icache.rs index 41893d3..15f1f5d 100644 --- a/src/fs/mescloud/icache.rs +++ b/src/fs/mescloud/icache.rs @@ -217,6 +217,13 @@ impl> MescloudICache { /// Find an existing child by (parent, name) or allocate a new inode. /// If new, inserts a stub ICB (parent+path set, attr=None, children=None, rc=0). /// Does NOT bump rc. Returns the inode number. + /// + /// # Safety invariant + /// + /// The `for_each` scan and `insert_icb` are **not** atomic. If two callers + /// race with the same `(parent, name)`, both may allocate distinct inodes + /// for the same logical child. This is currently safe because all callers + /// go through `&mut self` on the owning `Fs` implementation. pub async fn ensure_child_ino(&self, parent: Inode, name: &OsStr) -> Inode { // Search for existing child by parent + name let mut existing_ino = None; diff --git a/src/fs/mescloud/org.rs b/src/fs/mescloud/org.rs index 10476e2..968c748 100644 --- a/src/fs/mescloud/org.rs +++ b/src/fs/mescloud/org.rs @@ -337,6 +337,12 @@ impl OrgFs { ) .await; + warn!( + ino, + idx, + "register_repo_slot: resetting bridge for orphaned slot; \ + inner filesystem will not receive forget for stale inode mappings" + ); self.composite.slots[idx].bridge = HashMapBridge::new(); self.composite.slots[idx] .bridge diff --git a/src/fs/mescloud/repo.rs b/src/fs/mescloud/repo.rs index bf73b15..0d22196 100644 --- a/src/fs/mescloud/repo.rs +++ b/src/fs/mescloud/repo.rs @@ -139,6 +139,10 @@ async fn build_repo_path( cache: &AsyncICache, root_ino: Inode, ) -> Option { + /// Maximum parent-chain depth before bailing out. Prevents infinite loops + /// if a bug creates a cycle in the parent pointers. + const MAX_DEPTH: usize = 1024; + let parent = parent?; if parent == root_ino { return name.to_str().map(String::from); @@ -146,13 +150,20 @@ async fn build_repo_path( let mut components = vec![name.to_path_buf()]; let mut current = parent; - while current != root_ino { + for _ in 0..MAX_DEPTH { + if current == root_ino { + break; + } let (path, next_parent) = cache .get_icb(current, |icb| (icb.path.clone(), icb.parent)) .await?; components.push(path); current = next_parent?; } + if current != root_ino { + tracing::warn!("build_repo_path: exceeded MAX_DEPTH={MAX_DEPTH}, possible parent cycle"); + return None; + } components.reverse(); let joined: PathBuf = components.iter().collect(); joined.to_str().map(String::from) @@ -216,13 +227,19 @@ impl RepoFs { /// Returns `None` for the root inode (the repo top-level maps to `path=None` in the /// mesa content API). async fn path_of_inode(&self, ino: Inode) -> Option { + /// Maximum parent-chain depth before bailing out. + const MAX_DEPTH: usize = 1024; + if ino == Self::ROOT_INO { return None; } let mut components = Vec::new(); let mut current = ino; - while current != Self::ROOT_INO { + for _ in 0..MAX_DEPTH { + if current == Self::ROOT_INO { + break; + } let (path, parent) = self .icache .get_icb(current, |icb| (icb.path.clone(), icb.parent)) @@ -230,6 +247,13 @@ impl RepoFs { components.push(path); current = parent?; } + if current != Self::ROOT_INO { + tracing::warn!( + ino, + "path_of_inode: exceeded MAX_DEPTH={MAX_DEPTH}, possible parent cycle" + ); + return None; + } components.reverse(); let joined: PathBuf = components.iter().collect(); joined.to_str().map(String::from) From 6b0d35159013c1b2f0a24754f3d88d335d55284a Mon Sep 17 00:00:00 2001 From: Marko Vejnovic Date: Tue, 10 Feb 2026 22:32:13 -0800 Subject: [PATCH 57/57] deslop --- .../plans/2025-02-10-async-mescloud-icache.md | 915 ------------------ docs/plans/2026-02-03-update-checker.md | 217 ----- .../2026-02-04-mount-directory-interlock.md | 185 ---- docs/plans/2026-02-05-homebrew-tap-formula.md | 281 ------ docs/plans/2026-02-06-decouple-dcache-icb.md | 306 ------ .../2026-02-06-default-run-subcommand.md | 70 -- .../plans/2026-02-09-async-icache-resolver.md | 858 ---------------- docs/plans/2026-02-09-mesa-dev-migration.md | 469 --------- docs/plans/2026-02-09-shellcheck-workflow.md | 131 --- docs/plans/2026-02-10-composite-fs-dedup.md | 670 ------------- docs/plans/2026-02-10-file-table.md | 280 ------ ...2026-02-10-pr31-async-icache-bug-review.md | 526 ---------- docs/plans/2026-02-10-pr31-bug-review.md | 79 -- .../2026-02-10-readdir-icache-caching.md | 572 ----------- .../2026-02-10-resolver-as-data-provider.md | 334 ------- docs/plans/2026-02-10-split-async-contains.md | 287 ------ 16 files changed, 6180 deletions(-) delete mode 100644 docs/plans/2025-02-10-async-mescloud-icache.md delete mode 100644 docs/plans/2026-02-03-update-checker.md delete mode 100644 docs/plans/2026-02-04-mount-directory-interlock.md delete mode 100644 docs/plans/2026-02-05-homebrew-tap-formula.md delete mode 100644 docs/plans/2026-02-06-decouple-dcache-icb.md delete mode 100644 docs/plans/2026-02-06-default-run-subcommand.md delete mode 100644 docs/plans/2026-02-09-async-icache-resolver.md delete mode 100644 docs/plans/2026-02-09-mesa-dev-migration.md delete mode 100644 docs/plans/2026-02-09-shellcheck-workflow.md delete mode 100644 docs/plans/2026-02-10-composite-fs-dedup.md delete mode 100644 docs/plans/2026-02-10-file-table.md delete mode 100644 docs/plans/2026-02-10-pr31-async-icache-bug-review.md delete mode 100644 docs/plans/2026-02-10-pr31-bug-review.md delete mode 100644 docs/plans/2026-02-10-readdir-icache-caching.md delete mode 100644 docs/plans/2026-02-10-resolver-as-data-provider.md delete mode 100644 docs/plans/2026-02-10-split-async-contains.md diff --git a/docs/plans/2025-02-10-async-mescloud-icache.md b/docs/plans/2025-02-10-async-mescloud-icache.md deleted file mode 100644 index 559ef78..0000000 --- a/docs/plans/2025-02-10-async-mescloud-icache.md +++ /dev/null @@ -1,915 +0,0 @@ -# Async Mescloud ICache Migration - -> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. - -**Goal:** Rewrite `src/fs/mescloud/icache.rs` to use `AsyncICache`, with resolvers that fetch from the mesa backend. - -**Architecture:** `MescloudICache` becomes generic over `R: IcbResolver`. Each filesystem layer provides its own resolver: `RepoResolver` fetches file/directory metadata from the mesa content API, `OrgResolver` validates repos via the repo API, and `MesaResolver` creates static org directory entries. The `IcbLike` trait gains a `needs_resolve()` method so the cache can distinguish "stub" entries (parent+path known, attr not yet fetched) from fully resolved entries. `get_or_resolve` handles both missing entries and stubs that need resolution. - -**Tech Stack:** Rust, `scc::HashMap` (already in Cargo.toml), `tokio::sync::watch`, `async_trait`, `mesa_dev::MesaClient` - ---- - -## Key Design Decisions - -### Stub + Resolve pattern - -When `lookup(parent, name)` is called, the flow is: -1. `ensure_child_ino(parent, name)` — find existing child or allocate a new inode; insert a **stub** ICB (`parent` + `path` set, `attr: None`) if the entry is new. -2. `get_or_resolve(ino, |icb| icb.attr)` — if the stub's `needs_resolve()` returns true (attr is None), the cache transitions the entry to `InFlight`, calls the resolver, and transitions to `Available` once the resolver returns a fully populated ICB with attr. -3. Concurrent callers for the same inode coalesce: they see `InFlight` and wait. - -This lets the resolver access the stub's `parent` and `path` fields to build the API path, without needing a separate context parameter. - -### Resolver receives `stub: Option` + `cache: &AsyncICache` - -The resolver signature is: -```rust -fn resolve(&self, ino: Inode, stub: Option, cache: &AsyncICache) -> Future> -``` - -- `stub`: `Some(icb)` when upgrading a stub entry, `None` when creating from scratch. -- `cache`: lets the resolver walk the parent chain to build paths (e.g., `cache.get_icb(parent, |icb| icb.path.clone())`). - -### `get_or_resolve` handles stubs - -The existing `get_or_resolve` only resolves **missing** entries. We extend it to also resolve **Available entries where `icb.needs_resolve()` is true**: - -| Current state | Action | -|---|---| -| Available + `!needs_resolve()` | Return immediately (fast path) | -| Available + `needs_resolve()` | Extract stub → InFlight → resolve → Available | -| InFlight | Wait for resolution | -| Vacant | InFlight → resolve → Available | - -### `readdir` bypasses the resolver - -`readdir` makes a single API call that returns all children. It inserts fully-populated ICBs directly via `insert_icb` (not via the resolver). This is a batch optimization — the resolver is for per-inode resolution. - -### `MescloudICache` is generic - -```rust -pub struct MescloudICache> { - inner: AsyncICache, - inode_factory: InodeFactory, - fs_owner: (u32, u32), - block_size: u32, -} -``` - -Each FS layer instantiates with its own resolver: -- `RepoFs` → `MescloudICache` — resolver calls mesa content API -- `OrgFs` → `MescloudICache` — resolver validates repos + creates directory attrs -- `MesaFS` → `MescloudICache` — resolver creates static directory attrs - -### `readdir_buf` replaces `children` - -The `Fs::readdir` trait returns `&[DirEntry]` borrowed from `&mut self`. The async cache's closure-based API can't return references that outlive the closure. Each FS struct gets a `readdir_buf: Vec` field. The `children` field on `InodeControlBlock` is removed. - -### `make_common_file_attr` becomes a free function - -Currently a method on `MescloudICache` (uses `self.fs_owner`, `self.block_size`). Becomes a free function so resolvers can call it too. - ---- - -## Task 1: Extend IcbLike trait with `needs_resolve` - -**Files:** -- Modify: `src/fs/icache/mod.rs` - -**Step 1: Add `needs_resolve` to IcbLike** - -```rust -pub trait IcbLike { - fn new_root(path: std::path::PathBuf) -> Self; - fn rc(&self) -> u64; - fn rc_mut(&mut self) -> &mut u64; - /// Returns true if this entry needs resolution (e.g., attr not yet fetched). - fn needs_resolve(&self) -> bool; -} -``` - -**Step 2: Update existing IcbLike implementations** - -In `src/fs/local.rs`, the `InodeControlBlock` for local FS: -```rust -fn needs_resolve(&self) -> bool { - false // local FS entries are always fully resolved -} -``` - -In `src/fs/mescloud/icache.rs`: -```rust -fn needs_resolve(&self) -> bool { - self.attr.is_none() -} -``` - -In `src/fs/icache/async_cache.rs` tests, the `TestIcb`: -```rust -fn needs_resolve(&self) -> bool { - false -} -``` - -**Step 3: Verify compilation** - -Run: `cargo check -p git-fs` - -**Step 4: Commit** - -```bash -git add src/fs/icache/mod.rs src/fs/local.rs src/fs/mescloud/icache.rs src/fs/icache/async_cache.rs -git commit -m "feat(icache): add needs_resolve to IcbLike trait" -``` - ---- - -## Task 2: Modify IcbResolver trait and AsyncICache - -**Files:** -- Modify: `src/fs/icache/async_cache.rs` -- Modify: `src/fs/icache/mod.rs` - -**Step 1: Update IcbResolver trait** - -In `async_cache.rs`, change the resolver to receive stub data and cache reference: - -```rust -pub trait IcbResolver: Send + Sync { - type Icb: IcbLike + Send + Sync; - type Error: Send; - - /// Resolve an inode to a fully-populated control block. - /// - /// - `stub`: `Some(icb)` if upgrading an existing stub entry, `None` if creating - /// from scratch. The stub typically has `parent` and `path` set but `attr` missing. - /// - `cache`: reference to the cache, useful for walking parent chains to build paths. - fn resolve( - &self, - ino: Inode, - stub: Option, - cache: &AsyncICache, - ) -> impl Future> + Send - where - Self: Sized; -} -``` - -**Step 2: Update `get_or_resolve` to handle stubs** - -Rewrite `get_or_resolve` in `AsyncICache`: - -```rust -pub async fn get_or_resolve( - &self, - ino: Inode, - then: impl FnOnce(&R::Icb) -> T, -) -> Result { - use scc::hash_map::Entry; - - let mut then_fn = Some(then); - - // Fast path: Available and fully resolved - { - let hit = self - .inode_table - .read_async(&ino, |_, s| match s { - IcbState::Available(icb) if !icb.needs_resolve() => { - let t = then_fn.take().unwrap_or_else(|| unreachable!()); - Some(t(icb)) - } - _ => None, - }) - .await; - if let Some(Some(r)) = hit { - return Ok(r); - } - } - - // Slow path: missing, InFlight, or stub needing resolution - loop { - match self.inode_table.entry_async(ino).await { - Entry::Occupied(mut occ) => match occ.get_mut() { - IcbState::Available(icb) if !icb.needs_resolve() => { - let t = then_fn.take().unwrap_or_else(|| unreachable!()); - return Ok(t(icb)); - } - IcbState::Available(_) => { - // Stub needing resolution — extract stub, replace with InFlight - let (tx, rx) = watch::channel(()); - let old = std::mem::replace(occ.get_mut(), IcbState::InFlight(rx)); - let stub = match old { - IcbState::Available(icb) => icb, - _ => unreachable!(), - }; - drop(occ); // release shard lock before awaiting - - match self.resolver.resolve(ino, Some(stub), self).await { - Ok(icb) => { - let t = then_fn.take().unwrap_or_else(|| unreachable!()); - let result = t(&icb); - self.inode_table - .upsert_async(ino, IcbState::Available(icb)) - .await; - drop(tx); - return Ok(result); - } - Err(e) => { - self.inode_table.remove_async(&ino).await; - drop(tx); - return Err(e); - } - } - } - IcbState::InFlight(rx) => { - let mut rx = rx.clone(); - drop(occ); - let _ = rx.changed().await; - } - }, - Entry::Vacant(vac) => { - let (tx, rx) = watch::channel(()); - vac.insert_entry(IcbState::InFlight(rx)); - - match self.resolver.resolve(ino, None, self).await { - Ok(icb) => { - let t = then_fn.take().unwrap_or_else(|| unreachable!()); - let result = t(&icb); - self.inode_table - .upsert_async(ino, IcbState::Available(icb)) - .await; - drop(tx); - return Ok(result); - } - Err(e) => { - self.inode_table.remove_async(&ino).await; - drop(tx); - return Err(e); - } - } - } - } - } -} -``` - -**Step 3: Add `get_icb_mut_sync` for initialization** - -Add after `for_each`: - -```rust -/// Synchronous mutable access to an `Available` entry. -/// Does **not** wait for `InFlight`. Intended for initialization. -pub fn get_icb_mut_sync(&self, ino: Inode, f: impl FnOnce(&mut R::Icb) -> T) -> Option { - self.inode_table - .update(&ino, |_, state| match state { - IcbState::Available(icb) => Some(f(icb)), - IcbState::InFlight(_) => None, - }) - .flatten() -} -``` - -**Step 4: Update existing tests** - -All test resolvers need the new signature. Update `TestResolver`: - -```rust -impl IcbResolver for TestResolver { - type Icb = TestIcb; - type Error = String; - - fn resolve( - &self, - ino: Inode, - _stub: Option, - _cache: &AsyncICache, - ) -> impl Future> + Send { - let result = self.responses.lock().expect("test mutex").remove(&ino) - .unwrap_or_else(|| Err(format!("no response for inode {ino}"))); - async move { result } - } -} -``` - -Similarly update `CountingResolver`. Also update `TestIcb` to implement `needs_resolve`: - -```rust -impl IcbLike for TestIcb { - // ... existing methods ... - fn needs_resolve(&self) -> bool { - false - } -} -``` - -**Step 5: Add test for stub resolution** - -```rust -#[tokio::test] -async fn get_or_resolve_resolves_stubs() { - let resolver = TestResolver::new(); - resolver.add(42, TestIcb { rc: 1, path: "/resolved".into() }); - let cache = test_cache_with(resolver); - - // Insert a stub that needs_resolve - // We need a TestIcb variant that returns true for needs_resolve... - // For this test, use a NeedsResolveIcb or modify TestIcb. - // Simplest: make TestIcb.needs_resolve configurable. -} -``` - -Note: to properly test stub resolution, `TestIcb` needs a way to signal `needs_resolve() == true`. Add an optional field: - -```rust -#[derive(Debug, Clone, PartialEq)] -struct TestIcb { - rc: u64, - path: PathBuf, - resolved: bool, // defaults to true in existing tests -} - -impl IcbLike for TestIcb { - fn needs_resolve(&self) -> bool { - !self.resolved - } - // ... -} -``` - -Then test: -```rust -#[tokio::test] -async fn get_or_resolve_resolves_stub_entry() { - let resolver = TestResolver::new(); - resolver.add(42, TestIcb { rc: 1, path: "/resolved".into(), resolved: true }); - let cache = test_cache_with(resolver); - - // Insert unresolved stub - cache.insert_icb(42, TestIcb { rc: 0, path: "/stub".into(), resolved: false }).await; - - // get_or_resolve should trigger resolution because needs_resolve() == true - let path: Result = cache.get_or_resolve(42, |icb| icb.path.clone()).await; - assert_eq!(path, Ok(PathBuf::from("/resolved"))); -} -``` - -**Step 6: Run tests** - -Run: `cargo test -p git-fs --lib fs::icache::async_cache::tests` -Expected: PASS - -**Step 7: Update icache/mod.rs exports** - -Remove `#[cfg_attr(not(test), expect(dead_code))]` and `#[expect(unused_imports)]` annotations. - -**Step 8: Commit** - -```bash -git add src/fs/icache/async_cache.rs src/fs/icache/mod.rs -git commit -m "feat(icache): extend IcbResolver with stub+cache params, handle stubs in get_or_resolve" -``` - ---- - -## Task 3: Make InodeFactory atomic - -**Files:** -- Modify: `src/fs/icache/inode_factory.rs` - -**Step 1: Rewrite with AtomicU64** - -```rust -use std::sync::atomic::{AtomicU64, Ordering}; -use crate::fs::r#trait::Inode; - -pub struct InodeFactory { - next_inode: AtomicU64, -} - -impl InodeFactory { - pub fn new(start: Inode) -> Self { - Self { next_inode: AtomicU64::new(start) } - } - - pub fn allocate(&self) -> Inode { - self.next_inode.fetch_add(1, Ordering::Relaxed) - } -} -``` - -**Step 2: Add tests, verify, commit** - -```bash -git add src/fs/icache/inode_factory.rs -git commit -m "feat(icache): make InodeFactory atomic" -``` - ---- - -## Task 4: Rewrite MescloudICache as generic wrapper - -**Files:** -- Modify: `src/fs/mescloud/icache.rs` - -This is the core rewrite. `MescloudICache` becomes `MescloudICache` generic over the resolver. - -**Step 1: Define the new MescloudICache** - -```rust -use std::ffi::OsStr; -use std::time::SystemTime; - -use tracing::warn; - -use crate::fs::icache::{AsyncICache, IcbLike, IcbResolver, InodeFactory}; -use crate::fs::r#trait::{ - CommonFileAttr, DirEntryType, FileAttr, FileHandle, FilesystemStats, Inode, Permissions, -}; - -pub struct InodeControlBlock { - pub parent: Option, - pub rc: u64, - pub path: std::path::PathBuf, - pub attr: Option, -} - -impl IcbLike for InodeControlBlock { - fn new_root(path: std::path::PathBuf) -> Self { - Self { rc: 1, parent: None, path, attr: None } - } - fn rc(&self) -> u64 { self.rc } - fn rc_mut(&mut self) -> &mut u64 { &mut self.rc } - fn needs_resolve(&self) -> bool { self.attr.is_none() } -} - -/// Free function — usable by both MescloudICache and resolvers. -pub fn make_common_file_attr( - ino: Inode, perm: u16, atime: SystemTime, mtime: SystemTime, - fs_owner: (u32, u32), block_size: u32, -) -> CommonFileAttr { - CommonFileAttr { - ino, atime, mtime, - ctime: SystemTime::UNIX_EPOCH, - crtime: SystemTime::UNIX_EPOCH, - perm: Permissions::from_bits_truncate(perm), - nlink: 1, - uid: fs_owner.0, - gid: fs_owner.1, - blksize: block_size, - } -} - -pub fn blocks_of_size(block_size: u32, size: u64) -> u64 { - size.div_ceil(u64::from(block_size)) -} - -pub struct MescloudICache> { - inner: AsyncICache, - inode_factory: InodeFactory, - fs_owner: (u32, u32), - block_size: u32, -} -``` - -**Step 2: Implement methods** - -Key methods (all `&self`): - -```rust -impl> MescloudICache { - pub fn new(resolver: R, root_ino: Inode, fs_owner: (u32, u32), block_size: u32) -> Self { ... } - - // Delegated from AsyncICache (async): - pub async fn contains(&self, ino: Inode) -> bool { ... } - pub async fn get_icb(&self, ino: Inode, f: impl FnOnce(&InodeControlBlock) -> T) -> Option { ... } - pub async fn get_icb_mut(&self, ino: Inode, f: impl FnOnce(&mut InodeControlBlock) -> T) -> Option { ... } - pub async fn insert_icb(&self, ino: Inode, icb: InodeControlBlock) { ... } - pub async fn entry_or_insert_icb(&self, ino: Inode, factory: impl FnOnce() -> InodeControlBlock, then: impl FnOnce(&mut InodeControlBlock) -> T) -> T { ... } - pub async fn inc_rc(&self, ino: Inode) -> u64 { ... } - pub async fn forget(&self, ino: Inode, nlookups: u64) -> Option { ... } - pub async fn get_or_resolve(&self, ino: Inode, then: impl FnOnce(&InodeControlBlock) -> T) -> Result { ... } - - // Delegated (sync): - pub fn allocate_fh(&self) -> FileHandle { ... } - pub fn for_each(&self, f: impl FnMut(&Inode, &InodeControlBlock)) { ... } - pub fn inode_count(&self) -> usize { ... } - - // Domain-specific: - pub fn allocate_inode(&self) -> Inode { ... } - pub async fn get_attr(&self, ino: Inode) -> Option { ... } - pub async fn cache_attr(&self, ino: Inode, attr: FileAttr) { ... } - pub fn fs_owner(&self) -> (u32, u32) { ... } - pub fn block_size(&self) -> u32 { ... } - pub fn statfs(&self) -> FilesystemStats { ... } - - /// Find an existing child inode by (parent, name), or allocate a new one. - /// If the entry is new, inserts a stub ICB (parent+path set, attr=None). - pub async fn ensure_child_ino(&self, parent: Inode, name: &OsStr) -> Inode { ... } -} -``` - -Notable changes from old `MescloudICache`: -- `new()` takes a `resolver: R` parameter -- `make_common_file_attr` is now a free function (exported from module) -- `ensure_child_inode` is split: `ensure_child_ino` (finds/allocates + inserts stub) + `get_or_resolve` (resolves via resolver) -- `children` field removed from `InodeControlBlock` -- Constructor uses `get_icb_mut_sync` to set root attr - -**Step 3: Write tests for MescloudICache** - -Create tests using a `TestResolver` that creates simple directory/file ICBs: - -```rust -#[cfg(test)] -mod tests { - use super::*; - - struct TestMescloudResolver { - fs_owner: (u32, u32), - block_size: u32, - } - - impl IcbResolver for TestMescloudResolver { - type Icb = InodeControlBlock; - type Error = String; - - fn resolve( - &self, ino: Inode, stub: Option, - _cache: &AsyncICache, - ) -> impl Future> + Send { - let fs_owner = self.fs_owner; - let block_size = self.block_size; - async move { - let stub = stub.ok_or("no stub")?; - let now = SystemTime::now(); - let attr = FileAttr::Directory { - common: make_common_file_attr(ino, 0o755, now, now, fs_owner, block_size), - }; - Ok(InodeControlBlock { attr: Some(attr), ..stub }) - } - } - } - - // Tests: new_creates_root_with_attr, ensure_child_ino_allocates, - // get_or_resolve_populates_attr, etc. -} -``` - -**Step 4: Verify, commit** - -```bash -git add src/fs/mescloud/icache.rs -git commit -m "feat(mescloud): rewrite MescloudICache as generic over IcbResolver" -``` - ---- - -## Task 5: Implement RepoResolver + update RepoFs - -**Files:** -- Modify: `src/fs/mescloud/repo.rs` - -**Step 1: Define RepoResolver** - -```rust -use super::icache::{blocks_of_size, make_common_file_attr, InodeControlBlock, MescloudICache}; -use crate::fs::icache::{AsyncICache, IcbLike, IcbResolver}; - -pub(super) struct RepoResolver { - client: MesaClient, - org_name: String, - repo_name: String, - ref_: String, - fs_owner: (u32, u32), - block_size: u32, -} - -impl IcbResolver for RepoResolver { - type Icb = InodeControlBlock; - type Error = LookupError; - - fn resolve( - &self, - ino: Inode, - stub: Option, - cache: &AsyncICache, - ) -> impl Future> + Send - where - Self: Sized, - { - // Move data needed by the async block - let client = self.client.clone(); - let org_name = self.org_name.clone(); - let repo_name = self.repo_name.clone(); - let ref_ = self.ref_.clone(); - let fs_owner = self.fs_owner; - let block_size = self.block_size; - - async move { - let stub = stub.expect("RepoResolver requires a stub ICB with parent+path"); - let parent = stub.parent.expect("non-root inodes have parents"); - - // Build repo-relative path by walking parent chain - let file_path = build_path_from_cache(parent, &stub.path, cache).await; - - // Fetch from mesa content API - let content = client - .org(&org_name).repos().at(&repo_name).content() - .get(Some(ref_.as_str()), file_path.as_deref(), None) - .await - .map_err(MesaApiError::from)?; - - let now = std::time::SystemTime::now(); - let attr = match &content { - Content::File(f) => { - let size = f.size.to_u64().unwrap_or(0); - FileAttr::RegularFile { - common: make_common_file_attr(ino, 0o644, now, now, fs_owner, block_size), - size, - blocks: blocks_of_size(block_size, size), - } - } - Content::Symlink(s) => { - let size = s.size.to_u64().unwrap_or(0); - FileAttr::RegularFile { - common: make_common_file_attr(ino, 0o644, now, now, fs_owner, block_size), - size, - blocks: blocks_of_size(block_size, size), - } - } - Content::Dir(_) => FileAttr::Directory { - common: make_common_file_attr(ino, 0o755, now, now, fs_owner, block_size), - }, - }; - - Ok(InodeControlBlock { - parent: stub.parent, - path: stub.path, - rc: stub.rc, - attr: Some(attr), - }) - } - } -} - -/// Walk the parent chain in the cache to build the repo-relative path. -async fn build_path_from_cache( - parent: Inode, - name: &std::path::Path, - cache: &AsyncICache, -) -> Option { - use std::path::PathBuf; - - let mut components = vec![name.to_path_buf()]; - let mut current = parent; - while current != RepoFs::ROOT_INO { - let (path, next_parent) = cache - .get_icb(current, |icb| (icb.path.clone(), icb.parent)) - .await?; - components.push(path); - current = next_parent?; - } - components.reverse(); - let joined: PathBuf = components.iter().collect(); - joined.to_str().map(String::from) -} -``` - -**Step 2: Update RepoFs struct** - -```rust -pub struct RepoFs { - icache: MescloudICache, - readdir_buf: Vec, - open_files: HashMap, -} -``` - -Constructor creates the resolver and passes it to `MescloudICache::new()`. The `client`, `org_name`, `repo_name`, `ref_` move into the resolver. `path_of_inode` and `path_of_child` are removed (path building is now in the resolver + `build_path_from_cache`). - -**Step 3: Update `lookup` to use get_or_resolve** - -```rust -async fn lookup(&mut self, parent: Inode, name: &OsStr) -> Result { - let ino = self.icache.ensure_child_ino(parent, name).await; - let attr = self.icache.get_or_resolve(ino, |icb| { - icb.attr.expect("resolver should populate attr") - }).await?; - self.icache.inc_rc(ino).await; - Ok(attr) -} -``` - -**Step 4: Update `readdir`** - -Readdir still calls the API directly (batch operation). For each child, uses `ensure_child_ino` + `insert_icb` with full attr. Uses `readdir_buf` for return. - -Note: readdir needs the path for the API call. Since `path_of_inode` was removed, add a helper method on `MescloudICache` or use `build_path_from_cache` directly. Actually, `path_of_inode` should stay on `RepoFs` (or become a method that uses the icache). Keep it as an async method that walks the parent chain. - -**Step 5: Update `read`** - -`read` still calls the API directly (data transfer, not metadata caching). Needs `path_of_inode` for the path. - -**Step 6: Update remaining Fs methods** - -- `getattr`: `self.icache.get_attr(ino).await.ok_or(...)` (unchanged pattern) -- `open`: `self.icache.contains(ino).await`, `self.icache.allocate_fh()` -- `forget`: `self.icache.forget(ino, nlookups).await` -- `statfs`: `self.icache.statfs()` - -**Step 7: Verify compilation + commit** - -```bash -git add src/fs/mescloud/repo.rs -git commit -m "feat(mescloud): implement RepoResolver, update RepoFs to use async icache" -``` - ---- - -## Task 6: Implement OrgResolver + update OrgFs - -**Files:** -- Modify: `src/fs/mescloud/org.rs` - -**Step 1: Define OrgResolver** - -```rust -pub(super) struct OrgResolver { - client: MesaClient, - org_name: String, - fs_owner: (u32, u32), - block_size: u32, -} - -impl IcbResolver for OrgResolver { - type Icb = InodeControlBlock; - type Error = LookupError; - - fn resolve( - &self, ino: Inode, stub: Option, - _cache: &AsyncICache, - ) -> impl Future> + Send { - let client = self.client.clone(); - let org_name = self.org_name.clone(); - let fs_owner = self.fs_owner; - let block_size = self.block_size; - - async move { - let stub = stub.expect("OrgResolver requires stub"); - - // Determine if this is a repo or owner dir. - // For now, all org-level inodes are directories. - // Repo validation is done by the caller before get_or_resolve. - let now = SystemTime::now(); - let attr = FileAttr::Directory { - common: make_common_file_attr(ino, 0o755, now, now, fs_owner, block_size), - }; - - Ok(InodeControlBlock { attr: Some(attr), ..stub }) - } - } -} -``` - -Note: The OrgResolver creates directory ICBs. Repo validation (the `wait_for_sync` API call) stays in `OrgFs::lookup` as a pre-check before `get_or_resolve`. This keeps the resolver simple and the validation/orchestration logic (creating `RepoFs`, bridge mappings) in `OrgFs`. - -**Step 2: Update OrgFs struct** - -```rust -pub struct OrgFs { - name: String, - client: MesaClient, - icache: MescloudICache, - readdir_buf: Vec, - repo_inodes: HashMap, - owner_inodes: HashMap, - repos: Vec, -} -``` - -**Step 3: Update helper methods (async)** - -- `repo_slot_for_inode` → async (walks parent chain via `get_icb(...).await`) -- `inode_role` → async (calls `repo_slot_for_inode`) -- `ensure_owner_inode` → async (calls icache methods with `.await`) -- `ensure_repo_inode` → async -- `translate_repo_ino_to_org` → async -- `inode_table_get_attr` → async - -**Step 4: Update Fs trait implementations** - -Same patterns as Task 5: add `.await` to icache calls, use `readdir_buf`, update `inode_role(...).await`, etc. - -**Step 5: Verify compilation + commit** - -```bash -git add src/fs/mescloud/org.rs -git commit -m "feat(mescloud): implement OrgResolver, update OrgFs to use async icache" -``` - ---- - -## Task 7: Implement MesaResolver + update MesaFS - -**Files:** -- Modify: `src/fs/mescloud/mod.rs` - -**Step 1: Define MesaResolver** - -```rust -pub(super) struct MesaResolver { - fs_owner: (u32, u32), - block_size: u32, -} - -impl IcbResolver for MesaResolver { - type Icb = InodeControlBlock; - type Error = std::convert::Infallible; - - fn resolve( - &self, ino: Inode, stub: Option, - _cache: &AsyncICache, - ) -> impl Future> + Send { - let fs_owner = self.fs_owner; - let block_size = self.block_size; - async move { - let stub = stub.unwrap_or_else(|| InodeControlBlock { - parent: None, path: "/".into(), rc: 0, attr: None, - }); - let now = SystemTime::now(); - let attr = FileAttr::Directory { - common: make_common_file_attr(ino, 0o755, now, now, fs_owner, block_size), - }; - Ok(InodeControlBlock { attr: Some(attr), ..stub }) - } - } -} -``` - -**Step 2: Update MesaFS struct** - -```rust -pub struct MesaFS { - icache: MescloudICache, - readdir_buf: Vec, - org_inodes: HashMap, - org_slots: Vec, -} -``` - -**Step 3: Update helper methods + Fs implementations** - -Same patterns: make helper methods async, add `.await`, use `readdir_buf`, update `inode_role(ino).await`, etc. - -**Step 4: Verify full compilation + commit** - -```bash -git add src/fs/mescloud/mod.rs -git commit -m "feat(mescloud): implement MesaResolver, update MesaFS to use async icache" -``` - ---- - -## Task 8: Update common.rs + clean up - -**Files:** -- Modify: `src/fs/mescloud/common.rs` -- Modify: `src/fs/icache/mod.rs` - -**Step 1: Update InodeControlBlock re-export** - -The `children` field was removed. Verify `common.rs` still compiles with the new ICB structure. - -**Step 2: Clean up icache/mod.rs exports** - -Remove dead-code annotations, ensure `AsyncICache`, `IcbResolver`, `IcbLike` are all exported cleanly. - -**Step 3: Run full test suite** - -Run: `cargo test -p git-fs` -Run: `cargo clippy -p git-fs` - -**Step 4: Final commit** - -```bash -git add -A -git commit -m "chore: clean up async icache migration" -``` - ---- - -## Summary of files changed - -| File | Change | -|---|---| -| `src/fs/icache/mod.rs` | Add `needs_resolve()` to `IcbLike`, remove dead-code annotations | -| `src/fs/icache/async_cache.rs` | Update `IcbResolver` trait (stub + cache params), extend `get_or_resolve` for stubs, add `get_icb_mut_sync` | -| `src/fs/icache/inode_factory.rs` | Make atomic (`AtomicU64`, `&self`) | -| `src/fs/mescloud/icache.rs` | Full rewrite: generic `MescloudICache`, `make_common_file_attr` free fn, `ensure_child_ino`, remove `children` from ICB | -| `src/fs/mescloud/repo.rs` | Add `RepoResolver` (fetches from mesa content API), update all methods to async icache | -| `src/fs/mescloud/org.rs` | Add `OrgResolver` (creates directory attrs), update all methods to async icache | -| `src/fs/mescloud/mod.rs` | Add `MesaResolver` (creates static directory attrs), update all methods to async icache | -| `src/fs/mescloud/common.rs` | Update ICB re-export (no `children`) | -| `src/fs/local.rs` | Add `needs_resolve()` to local `InodeControlBlock` | diff --git a/docs/plans/2026-02-03-update-checker.md b/docs/plans/2026-02-03-update-checker.md deleted file mode 100644 index 898d65b..0000000 --- a/docs/plans/2026-02-03-update-checker.md +++ /dev/null @@ -1,217 +0,0 @@ -# Update Checker Implementation Plan - -> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. - -**Goal:** Check at startup whether the user is running the latest released version of git-fs, and warn them if not. - -**Architecture:** Use the `self_update` crate to fetch the latest GitHub release from `mesa-dot-dev/git-fs`. Since releases use `canary-{short_sha}` tags (not semver), we embed the git commit SHA at build time and compare it against the latest release tag. If they differ, log an `error!` but continue execution normally. - -**Tech Stack:** `self_update` (GitHub backend), `vergen-gitcl` (build-time git SHA embedding) - ---- - -### Task 1: Add dependencies to Cargo.toml - -**Files:** -- Modify: `Cargo.toml` - -**Step 1: Add `self_update` and `vergen-gitcl` to Cargo.toml** - -Add to `[dependencies]`: -```toml -self_update = { version = "0.42", default-features = false, features = ["rustls"] } -``` - -Add a new section: -```toml -[build-dependencies] -vergen-gitcl = { version = "1", features = ["build"] } -``` - -The `self_update` crate is used to query GitHub releases. We disable default features and enable `rustls` to avoid linking OpenSSL. `vergen-gitcl` embeds the git short SHA at compile time so the binary knows what commit it was built from. - -**Step 2: Create `build.rs` to embed git SHA** - -Create: `build.rs` - -```rust -use vergen_gitcl::{BuildBuilder, Emitter, GitclBuilder}; - -fn main() -> Result<(), Box> { - let build = BuildBuilder::default().build_timestamp(false).build()?; - let gitcl = GitclBuilder::default().sha(true).build()?; - - Emitter::default() - .add_instructions(&build)? - .add_instructions(&gitcl)? - .emit()?; - - Ok(()) -} -``` - -This makes `VERGEN_GIT_SHA` available as an environment variable at compile time. - -**Step 3: Verify it compiles** - -Run: `cargo check` -Expected: Compiles without errors - -**Step 4: Commit** - -```bash -git add Cargo.toml Cargo.lock build.rs -git commit -m "feat: add self_update and vergen-gitcl dependencies" -``` - ---- - -### Task 2: Create `src/updates.rs` with update check logic - -**Files:** -- Create: `src/updates.rs` -- Modify: `src/main.rs` (add `mod updates;`) - -**Step 1: Create `src/updates.rs`** - -```rust -//! Checks whether the running binary is the latest released version. - -use tracing::{error, info}; - -/// The git SHA baked in at compile time by `vergen-gitcl`. -const BUILD_SHA: &str = env!("VERGEN_GIT_SHA"); - -/// Check GitHub for the latest release and warn if this binary is outdated. -/// -/// This function never fails the application — it logs errors and returns. -pub fn check_for_updates() { - let short_sha = &BUILD_SHA[..7.min(BUILD_SHA.len())]; - - let releases = match self_update::backends::github::ReleaseList::configure() - .repo_owner("mesa-dot-dev") - .repo_name("git-fs") - .build() - { - Ok(list) => match list.fetch() { - Ok(releases) => releases, - Err(e) => { - info!("Could not check for updates: {e}"); - return; - } - }, - Err(e) => { - info!("Could not configure update check: {e}"); - return; - } - }; - - let Some(latest) = releases.first() else { - info!("No releases found on GitHub."); - return; - }; - - // Release tags are "canary-{short_sha}". Extract the SHA suffix. - let latest_sha = latest - .version - .strip_prefix("canary-") - .unwrap_or(&latest.version); - - if short_sha == latest_sha { - info!("You are running the latest version ({short_sha})."); - } else { - error!( - "You are running git-fs built from commit {short_sha}, \ - but the latest release is from commit {latest_sha}. \ - Please update: https://github.com/mesa-dot-dev/git-fs/releases" - ); - } -} -``` - -**Step 2: Register the module in `src/main.rs`** - -Add `mod updates;` after the existing `mod fs;` line (line 10 of `src/main.rs`): - -```rust -mod app_config; -mod daemon; -mod fs; -mod updates; -``` - -**Step 3: Verify it compiles** - -Run: `cargo check` -Expected: Compiles without errors (there will be a "function never used" warning, which is fine — we call it in the next task) - -**Step 4: Commit** - -```bash -git add src/updates.rs src/main.rs -git commit -m "feat: add update checker module" -``` - ---- - -### Task 3: Call `check_for_updates()` from main - -**Files:** -- Modify: `src/main.rs` - -**Step 1: Add the update check call in `main()`** - -In `src/main.rs`, add the call right after tracing is initialized and before argument parsing (after line 51, before `let args = Args::parse();`): - -```rust -fn main() { - tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .with_span_events(FmtSpan::ENTER | FmtSpan::CLOSE) - .init(); - - updates::check_for_updates(); - - let args = Args::parse(); - // ... rest of main -``` - -**Step 2: Verify it compiles** - -Run: `cargo check` -Expected: Compiles without errors or warnings - -**Step 3: Run the binary to verify update check works** - -Run: `cargo run -- --help` -Expected: You should see either the "latest version" info log or the "please update" error log (depending on whether your local commit matches the latest release), followed by the normal help output. - -**Step 4: Commit** - -```bash -git add src/main.rs -git commit -m "feat: check for updates on startup" -``` - ---- - -### Task 4: Verify clippy and formatting pass - -**Files:** (no changes expected, just verification) - -**Step 1: Run clippy** - -Run: `cargo clippy -- -D warnings` -Expected: No errors or warnings. If there are issues (e.g., the strict lint config may flag `expect_used` or `unwrap_used`), fix them by replacing with match/if-let as needed. - -**Step 2: Run rustfmt** - -Run: `cargo fmt --check` -Expected: No formatting issues. - -**Step 3: Commit any fixes if needed** - -```bash -git add -A -git commit -m "fix: address clippy and formatting issues" -``` diff --git a/docs/plans/2026-02-04-mount-directory-interlock.md b/docs/plans/2026-02-04-mount-directory-interlock.md deleted file mode 100644 index 4bdc091..0000000 --- a/docs/plans/2026-02-04-mount-directory-interlock.md +++ /dev/null @@ -1,185 +0,0 @@ -# Mount Directory Interlock Implementation Plan - -> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. - -**Goal:** Ensure the mount directory is created (including all parents) before mounting, and return an error if the directory already exists and is non-empty. - -**Architecture:** Add a `prepare_mount_point` function in `daemon.rs` that checks whether the mount directory exists and is non-empty (error), creates it with `create_dir_all` if it doesn't exist (logging via `info!`), or proceeds silently if it exists and is empty. This runs in `daemon::run` before spawning the FUSE session. The existing validation in `app_config.rs` that checks for a parent directory is no longer needed since `create_dir_all` handles the full path. - -**Tech Stack:** Rust std (`std::fs`, `tokio::fs`), `tracing` (`info!`) - ---- - -### Task 1: Add `prepare_mount_point` function to `daemon.rs` - -**Files:** -- Modify: `src/daemon.rs` - -**Step 1: Write the `prepare_mount_point` function** - -Add the following function after the `managed_fuse` module (before `wait_for_exit`), around line 141: - -```rust -/// Prepares the mount point directory. -/// -/// - If the directory exists and is non-empty, returns an error. -/// - If the directory does not exist, creates it (including parents) and logs an info message. -/// - If the directory exists and is empty, does nothing. -async fn prepare_mount_point(mount_point: &std::path::Path) -> Result<(), std::io::Error> { - match tokio::fs::read_dir(mount_point).await { - Ok(mut entries) => { - if entries.next_entry().await?.is_some() { - return Err(std::io::Error::new( - std::io::ErrorKind::AlreadyExists, - format!( - "Mount point '{}' already exists and is not empty.", - mount_point.display() - ), - )); - } - Ok(()) - } - Err(e) if e.kind() == std::io::ErrorKind::NotFound => { - tokio::fs::create_dir_all(mount_point).await?; - info!(path = %mount_point.display(), "Created mount point directory."); - Ok(()) - } - Err(e) => Err(e), - } -} -``` - -The logic is: -- `read_dir` succeeds → directory exists. Check if it has any entry; if so, error out. -- `read_dir` fails with `NotFound` → directory doesn't exist. Create it and log. -- `read_dir` fails with another error → propagate it (e.g., permission denied). - -**Step 2: Call `prepare_mount_point` in `daemon::run`** - -In the `run` function (`src/daemon.rs:162`), add the call **before** `ManagedFuse::new`. The function currently looks like: - -```rust -pub async fn run( - config: app_config::Config, - handle: tokio::runtime::Handle, -) -> Result<(), std::io::Error> { - // Spawn the cache if it doesn't exist. - tokio::fs::create_dir_all(&config.cache.path).await?; - - debug!(config = ?config, "Starting git-fs daemon..."); - - let fuse = managed_fuse::ManagedFuse::new(&config); -``` - -Change it to: - -```rust -pub async fn run( - config: app_config::Config, - handle: tokio::runtime::Handle, -) -> Result<(), std::io::Error> { - // Spawn the cache if it doesn't exist. - tokio::fs::create_dir_all(&config.cache.path).await?; - - prepare_mount_point(&config.mount_point).await?; - - debug!(config = ?config, "Starting git-fs daemon..."); - - let fuse = managed_fuse::ManagedFuse::new(&config); -``` - -**Step 3: Add `info` to the tracing imports** - -The file currently imports `use tracing::{debug, error};` at line 5. Change to: - -```rust -use tracing::{debug, error, info}; -``` - -**Step 4: Verify it compiles** - -Run: `cargo check` -Expected: No errors. - -**Step 5: Commit** - -```bash -git add src/daemon.rs -git commit -m "feat: add mount point interlock - create dir or error if non-empty" -``` - ---- - -### Task 2: Remove stale mount_point parent validation from `app_config.rs` - -**Files:** -- Modify: `src/app_config.rs` - -**Step 1: Remove the mount_point parent check** - -In `Config::validate()` (`src/app_config.rs:225`), remove the mount_point parent directory validation block (lines 235-240): - -```rust - // REMOVE THIS BLOCK: - if self.mount_point.parent().is_none() { - errors.push(format!( - "Mount point path '{}' has no parent directory.", - self.mount_point.display() - )); - } -``` - -This check is no longer needed because `prepare_mount_point` in `daemon.rs` now calls `create_dir_all` which handles the full path including all parents. The only path that has no parent is `/`, and that's not a valid mount point for other reasons (the non-empty check will catch it). - -**Step 2: Verify it compiles** - -Run: `cargo check` -Expected: No errors. - -**Step 3: Commit** - -```bash -git add src/app_config.rs -git commit -m "refactor: remove stale mount_point parent validation" -``` - ---- - -### Task 3: Manual smoke test - -**Step 1: Test with a non-existent mount point** - -```bash -# Pick a temp path that doesn't exist -export TEST_MNT=$(mktemp -d)/git-fs-test-mnt -rmdir "$(dirname "$TEST_MNT")" # remove so the full path is gone -cargo run -- --config-path /dev/null run # uses default mount point -# Or set GIT_FS_MOUNT_POINT=$TEST_MNT -``` - -Expected: The directory is created and an `info` log line appears saying "Created mount point directory." - -**Step 2: Test with a non-empty mount point** - -```bash -mkdir -p /tmp/git-fs-nonempty-test -touch /tmp/git-fs-nonempty-test/somefile -GIT_FS_MOUNT_POINT=/tmp/git-fs-nonempty-test cargo run -- run -``` - -Expected: Error message about mount point not being empty. Process exits with an error. - -**Step 3: Test with an existing empty mount point** - -```bash -mkdir -p /tmp/git-fs-empty-test -GIT_FS_MOUNT_POINT=/tmp/git-fs-empty-test cargo run -- run -``` - -Expected: No error about the directory, proceeds to mount normally. - -**Step 4: Clean up** - -```bash -rm -rf /tmp/git-fs-nonempty-test /tmp/git-fs-empty-test -``` diff --git a/docs/plans/2026-02-05-homebrew-tap-formula.md b/docs/plans/2026-02-05-homebrew-tap-formula.md deleted file mode 100644 index 152f935..0000000 --- a/docs/plans/2026-02-05-homebrew-tap-formula.md +++ /dev/null @@ -1,281 +0,0 @@ -# Homebrew Tap Formula for git-fs (v2 — versioned releases) - -> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. - -**Goal:** On each promote-to-latest, create a permanent versioned GitHub release AND update the Homebrew tap with both an updated `git-fs.rb` (latest) and a new versioned `git-fs@{version}.rb` formula. - -**Architecture:** The promote-to-latest workflow gets two new capabilities: (1) it creates a permanent versioned release tag (e.g., `v0.1.1-alpha.1`) in addition to the ephemeral `latest` tag, and (2) it pushes a commit to `mesa-dot-dev/homebrew-tap` that updates `Formula/git-fs.rb` and creates `Formula/git-fs@{version}.rb`. Both formulas point to the permanent versioned release URL. - -**Tech Stack:** Homebrew Ruby formula, GitHub Actions, bash, `gh` CLI - ---- - -### Task 1: Add outputs and versioned release to the promote job - -**Files:** -- Modify: `.github/workflows/promote-to-latest.yml` (the `promote` job only) - -**Step 1: Add `outputs` to the `promote` job** - -The `update-homebrew` job needs the version and tag from `promote`. Add an `outputs` block to the `promote` job so downstream jobs can access them: - -```yaml - promote: - name: Promote canary to latest - runs-on: ubuntu-latest - outputs: - version: ${{ steps.canary.outputs.version }} - base_version: ${{ steps.canary.outputs.base_version }} - tag: ${{ steps.canary.outputs.tag }} - target: ${{ steps.canary.outputs.target }} - steps: - ... -``` - -**Step 2: Extract the base version (strip `+sha`) in the "Find latest canary release" step** - -After the existing `VERSION` extraction (line ~42), add: - -```bash - BASE_VERSION=$(echo "${VERSION}" | sed 's/+.*//') - echo "base_version=${BASE_VERSION}" >> "$GITHUB_OUTPUT" -``` - -**Step 3: Add a new step to create the permanent versioned release** - -After the "Create latest release" step (line ~86), add a new step: - -```yaml - - name: Create versioned release - env: - GH_TOKEN: ${{ github.token }} - VERSION: ${{ steps.canary.outputs.version }} - BASE_VERSION: ${{ steps.canary.outputs.base_version }} - TARGET: ${{ steps.canary.outputs.target }} - run: | - TAG="v${BASE_VERSION}" - if gh release view "${TAG}" &>/dev/null; then - echo "Release ${TAG} already exists, skipping." - exit 0 - fi - gh release create "${TAG}" \ - --title "git-fs ${BASE_VERSION}" \ - --notes "Stable release of git-fs ${BASE_VERSION}." \ - --target "${TARGET}" \ - assets/* -``` - -**Step 4: Commit** - -```bash -git add .github/workflows/promote-to-latest.yml -git commit -m "feat: add versioned release to promote workflow" -``` - ---- - -### Task 2: Rewrite the update-homebrew job - -**Files:** -- Modify: `.github/workflows/promote-to-latest.yml` (the `update-homebrew` job only) - -Replace the entire `update-homebrew` job with the version below. Key changes: -- Reads `base_version` from promote outputs -- Downloads tarball from the versioned release tag (not `latest`) -- Updates `Formula/git-fs.rb` with new `url`, `sha256`, and `version` -- Creates a new `Formula/git-fs@{version}.rb` with the correct Homebrew class name -- Commits both files in a single push - -```yaml - update-homebrew: - name: Update Homebrew formula - needs: [promote] - runs-on: ubuntu-latest - steps: - - name: Download macOS universal tarball - env: - GH_TOKEN: ${{ github.token }} - run: | - TAG="v${{ needs.promote.outputs.base_version }}" - curl -fSL -o git-fs-macos-universal.tar.gz \ - "https://github.com/${{ github.repository }}/releases/download/${TAG}/git-fs-macos-universal.tar.gz" - - - name: Compute SHA256 - id: sha - run: | - SHA=$(sha256sum git-fs-macos-universal.tar.gz | cut -d' ' -f1) - echo "sha256=${SHA}" >> "$GITHUB_OUTPUT" - echo "SHA256: ${SHA}" - - - name: Update tap formulae - env: - TAP_TOKEN: ${{ secrets.HOMEBREW_TAP_TOKEN }} - BASE_VERSION: ${{ needs.promote.outputs.base_version }} - SHA256: ${{ steps.sha.outputs.sha256 }} - run: | - git clone "https://x-access-token:${TAP_TOKEN}@github.com/mesa-dot-dev/homebrew-tap.git" tap - cd tap - - TAG="v${BASE_VERSION}" - URL="https://github.com/mesa-dot-dev/git-fs/releases/download/${TAG}/git-fs-macos-universal.tar.gz" - - # Compute Homebrew class name for versioned formula - # git-fs@0.1.1-alpha.1 → GitFsAT011Alpha1 - CLASS_NAME=$(ruby -e " - name = 'git-fs@${BASE_VERSION}' - class_name = name.capitalize - class_name.gsub!(/[-_.\s]([a-zA-Z0-9])/) { \$1.upcase } - class_name.tr!('+', 'x') - class_name.sub!(/(.)@(\d)/, '\1AT\2') - puts class_name - ") - FORMULA_FILE="Formula/git-fs@${BASE_VERSION}.rb" - - # Update Formula/git-fs.rb (latest) - cat > Formula/git-fs.rb << FORMULA - class GitFs < Formula - desc "Mount Mesa, GitHub and GitLab repositories as local filesystems via FUSE" - homepage "https://github.com/mesa-dot-dev/git-fs" - version "${BASE_VERSION}" - url "${URL}" - sha256 "${SHA256}" - license "MIT" - - depends_on :macos - depends_on cask: "macfuse" - - def install - bin.install "git-fs" - end - - test do - assert_match "git-fs", shell_output("#{bin}/git-fs --version", 2) - end - end - FORMULA - - # Create versioned formula (e.g., Formula/git-fs@0.1.1-alpha.1.rb) - cat > "${FORMULA_FILE}" << FORMULA - class ${CLASS_NAME} < Formula - desc "Mount Mesa, GitHub and GitLab repositories as local filesystems via FUSE" - homepage "https://github.com/mesa-dot-dev/git-fs" - version "${BASE_VERSION}" - url "${URL}" - sha256 "${SHA256}" - license "MIT" - - depends_on :macos - depends_on cask: "macfuse" - - def install - bin.install "git-fs" - end - - test do - assert_match "git-fs", shell_output("#{bin}/git-fs --version", 2) - end - end - FORMULA - - git config user.name "github-actions[bot]" - git config user.email "github-actions[bot]@users.noreply.github.com" - git add Formula/ - git diff --cached --quiet && echo "No changes to commit" && exit 0 - git commit -m "git-fs ${BASE_VERSION}" - git push -``` - -**Important detail about heredoc indentation:** The `cat > file << FORMULA` heredocs above must produce Ruby files with **no leading indentation** (Homebrew requires the class definition at column 0, methods indented 2 spaces). The heredocs in the YAML `run:` block must be written so the output has correct Ruby indentation — i.e., the content lines inside the heredoc should NOT be indented relative to the YAML block. Use `<<-FORMULA` with tab-stripping or write the content flush-left. - -**Step 1: Replace the update-homebrew job in the workflow file** - -Delete lines 102-135 (the current `update-homebrew` job) and replace with the YAML above. - -**Step 2: Verify YAML indentation is correct** - -The `update-homebrew` job must be at the same indent level as `promote` (2 spaces under `jobs:`). - -**Step 3: Commit** - -```bash -git add .github/workflows/promote-to-latest.yml -git commit -m "feat: versioned Homebrew formulae on promote-to-latest" -``` - ---- - -### Task 3: Update the initial formula in the tap repo - -**Files:** -- Modify: `Formula/git-fs.rb` (in `mesa-dot-dev/homebrew-tap` repo) - -The formula currently has no `version` field and points to the `latest` download URL. Update it to match the structure that CI will maintain, so the first CI run doesn't produce a confusing diff. - -**Step 1: Update the formula** - -Push directly to main in the tap repo: - -```bash -cd /tmp -rm -rf homebrew-tap-update -gh repo clone mesa-dot-dev/homebrew-tap homebrew-tap-update -cd homebrew-tap-update -``` - -Overwrite `Formula/git-fs.rb` with: - -```ruby -class GitFs < Formula - desc "Mount Mesa, GitHub and GitLab repositories as local filesystems via FUSE" - homepage "https://github.com/mesa-dot-dev/git-fs" - version "0.0.0" - url "https://github.com/mesa-dot-dev/git-fs/releases/download/v0.0.0/git-fs-macos-universal.tar.gz" - sha256 "PLACEHOLDER" - license "MIT" - - depends_on :macos - depends_on cask: "macfuse" - - def install - bin.install "git-fs" - end - - test do - assert_match "git-fs", shell_output("#{bin}/git-fs --version", 2) - end -end -``` - -**Step 2: Commit and push** - -```bash -git add Formula/git-fs.rb -git commit -m "Add version field to formula template" -git push -``` - ---- - -## Verification - -After all tasks are complete: - -1. Push the workflow changes to `main` in `mesa-dot-dev/git-fs` -2. Run the `Promote to Latest` workflow manually from GitHub Actions -3. Verify a new permanent release `v0.1.1-alpha.1` exists alongside `latest` -4. Verify `mesa-dot-dev/homebrew-tap` has both: - - `Formula/git-fs.rb` — updated with real SHA256 and versioned URL - - `Formula/git-fs@0.1.1-alpha.1.rb` — new file with same content and correct class name -5. Test install: `brew tap mesa-dot-dev/homebrew-tap && brew install git-fs` -6. Test versioned install: `brew install mesa-dot-dev/homebrew-tap/git-fs@0.1.1-alpha.1` - -## User install flow - -```bash -# Latest version -brew tap mesa-dot-dev/homebrew-tap -brew install git-fs - -# Specific version -brew install mesa-dot-dev/homebrew-tap/git-fs@0.1.1-alpha.1 -``` diff --git a/docs/plans/2026-02-06-decouple-dcache-icb.md b/docs/plans/2026-02-06-decouple-dcache-icb.md deleted file mode 100644 index da8911f..0000000 --- a/docs/plans/2026-02-06-decouple-dcache-icb.md +++ /dev/null @@ -1,306 +0,0 @@ -# Decouple DCache and Mescloud InodeControlBlock - -> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. - -**Goal:** Separate the generic `MescloudDCache` implementation from the mescloud-specific `InodeControlBlock` struct, so that `src/fs/dcache/` contains only generic cache machinery and `src/fs/mescloud/` owns its own ICB definition. - -**Architecture:** Currently `src/fs/dcache/mescloud.rs` conflates two concerns: (1) the `MescloudDCache` wrapper (inode allocation, attr caching, statfs) and (2) the `InodeControlBlock` data structure specific to mescloud filesystems. We will split this file in two: `src/fs/dcache/dcache.rs` gets the `MescloudDCache` (renamed to just keep the module name generic), and `src/fs/mescloud/dcache.rs` gets the `InodeControlBlock`. We also rename `MescloudDCache` to a more generic name since the wrapper is not truly mescloud-specific. - -**Tech Stack:** Rust, no new dependencies. - ---- - -## Analysis: What Lives Where - -### Currently in `src/fs/dcache/mescloud.rs` (246 lines): -1. **`InodeControlBlock`** (lines 19-47) - Mescloud-specific ICB with `parent`, `rc`, `path`, `children`, `attr` fields + `IcbLike` impl -2. **`InodeFactory`** (lines 52-67) - Monotonically increasing inode allocator (private helper) -3. **`MescloudDCache`** (lines 75-241) - Wraps `DCache` with inode allocation, attr caching, child inode management, attr construction, statfs -4. **`blocks_of_size`** (line 243) - Utility function used in `repo.rs` - -### Consumers of `InodeControlBlock`: -- `src/fs/mescloud/common.rs:5` - `pub(super) use crate::fs::dcache::mescloud::InodeControlBlock` -- `src/fs/mescloud/mod.rs:17` - `use common::InodeControlBlock` (constructs ICB literals) -- `src/fs/mescloud/org.rs:11` - `use super::common::InodeControlBlock` (constructs ICB literals) -- `src/fs/dcache/mescloud.rs` itself (constructs ICB in `ensure_child_inode`) - -### Key insight: -`MescloudDCache` directly constructs `InodeControlBlock` literals inside `ensure_child_inode()` (line 164). This creates a hard coupling. To decouple, `MescloudDCache` must become generic over its ICB type (using `IcbLike`), or `ensure_child_inode` must be moved/changed. Since `ensure_child_inode` needs `parent`, `attr`, and `children` fields that go beyond `IcbLike`, the cleanest approach is: - -1. Move `InodeControlBlock` to `src/fs/mescloud/dcache.rs` -2. Keep `MescloudDCache` in `src/fs/dcache/` but rename the file to reflect it's a higher-level cache wrapper -3. Add a factory method to `IcbLike` so `MescloudDCache` can construct ICBs generically, OR keep `MescloudDCache` typed to `InodeControlBlock` but import it from the new location - -**Chosen approach:** The simplest correct refactor is: -- Move `InodeControlBlock` + its `IcbLike` impl to `src/fs/mescloud/dcache.rs` -- Keep `MescloudDCache` in `src/fs/dcache/` (rename file from `mescloud.rs` to `dcache.rs`) but make it import `InodeControlBlock` from `src/fs/mescloud::dcache` -- This creates a circular dependency problem: `dcache` depends on `mescloud` and `mescloud` depends on `dcache` - -**Revised approach:** To avoid circular deps, we must make `MescloudDCache` generic. Extend `IcbLike` with the additional capabilities that `ensure_child_inode` and attr methods need: - -- Add `parent(&self) -> Option` and `set_parent(&mut self, parent: Option)` to `IcbLike` -- Add `path(&self) -> &Path` to `IcbLike` -- Add `attr(&self) -> Option` and `set_attr(&mut self, attr: Option)` to `IcbLike` -- Add `children(&self) -> Option<&[DirEntry]>` and `children_mut` to `IcbLike` -- Add a new constructor `fn new_child(parent: Inode, path: PathBuf) -> Self` to `IcbLike` - -This is over-engineering. Let's reconsider. - -**Final approach (simplest):** -1. Rename `src/fs/dcache/mescloud.rs` to `src/fs/dcache/dcache.rs` - keep `MescloudDCache`, `InodeFactory`, `blocks_of_size` here -2. Create `src/fs/mescloud/dcache.rs` - move `InodeControlBlock` + `IcbLike` impl here -3. `MescloudDCache` in `src/fs/dcache/dcache.rs` imports `InodeControlBlock` from `crate::fs::mescloud::dcache` -4. **Circular dependency check:** `src/fs/dcache/dcache.rs` imports from `crate::fs::mescloud::dcache` and `src/fs/mescloud/*` imports from `crate::fs::dcache`. In Rust, cross-module imports within the same crate are fine as long as there are no circular `mod` declarations. Since both `dcache` and `mescloud` are siblings under `src/fs/mod.rs`, this works. - ---- - -## Additional Readability Opportunities Found - -1. **`src/fs/mescloud/common.rs`** - The `pub(super) use crate::fs::dcache::mescloud::InodeControlBlock` re-export (line 5) should change to import from the new location (`super::dcache::InodeControlBlock`). - -2. **`blocks_of_size` function** - Currently lives in `src/fs/dcache/mescloud.rs` (line 243) but is only used by `src/fs/mescloud/repo.rs`. It's a mescloud concern, not a generic dcache concern. Move it to `src/fs/mescloud/dcache.rs` alongside `InodeControlBlock`. - ---- - -## Tasks - -### Task 1: Create `src/fs/mescloud/dcache.rs` with `InodeControlBlock` - -**Files:** -- Create: `src/fs/mescloud/dcache.rs` -- Modify: `src/fs/mescloud/mod.rs` (add `pub mod dcache;` declaration) - -**Step 1: Create `src/fs/mescloud/dcache.rs`** - -```rust -//! Mescloud-specific inode control block and helpers. - -use crate::fs::dcache::IcbLike; -use crate::fs::r#trait::{DirEntry, Inode}; - -/// Inode control block for mescloud filesystem layers (MesaFS, OrgFs, RepoFs). -pub struct InodeControlBlock { - /// The root inode doesn't have a parent. - pub parent: Option, - pub rc: u64, - pub path: std::path::PathBuf, - pub children: Option>, - /// Cached file attributes from the last lookup. - pub attr: Option, -} - -impl IcbLike for InodeControlBlock { - fn new_root(path: std::path::PathBuf) -> Self { - Self { - rc: 1, - parent: None, - path, - children: None, - attr: None, - } - } - - fn rc(&self) -> u64 { - self.rc - } - - fn rc_mut(&mut self) -> &mut u64 { - &mut self.rc - } -} - -/// Calculate the number of blocks needed for a given size. -pub fn blocks_of_size(block_size: u32, size: u64) -> u64 { - size.div_ceil(u64::from(block_size)) -} -``` - -**Step 2: Add module declaration in `src/fs/mescloud/mod.rs`** - -Add `pub mod dcache;` after the existing module declarations (after line 25: `pub mod repo;`). The new line: - -```rust -pub mod dcache; -``` - -**Step 3: Verify it compiles** - -Run: `cargo check 2>&1 | head -30` -Expected: Compiles (new module exists but isn't consumed yet; existing code still uses old paths) - -**Step 4: Commit** - -```bash -git add src/fs/mescloud/dcache.rs src/fs/mescloud/mod.rs -git commit -m "Add mescloud/dcache.rs with InodeControlBlock and blocks_of_size" -``` - ---- - -### Task 2: Rename `src/fs/dcache/mescloud.rs` to `src/fs/dcache/dcache.rs` and update imports - -**Files:** -- Rename: `src/fs/dcache/mescloud.rs` -> `src/fs/dcache/dcache.rs` -- Modify: `src/fs/dcache/dcache.rs` (remove `InodeControlBlock`, `IcbLike` impl, and `blocks_of_size`; import `InodeControlBlock` from new location) -- Modify: `src/fs/dcache/mod.rs` (change `pub mod mescloud;` to `pub mod dcache;`, update re-exports) - -**Step 1: Rename the file** - -```bash -git mv src/fs/dcache/mescloud.rs src/fs/dcache/dcache.rs -``` - -**Step 2: Update `src/fs/dcache/mod.rs`** - -Replace the full contents with: - -```rust -//! Generic directory cache and inode management primitives. - -mod dcache; -pub mod bridge; -mod table; - -pub use dcache::MescloudDCache; -pub use table::DCache; - -/// Common interface for inode control block types usable with `DCache`. -pub trait IcbLike { - /// Create an ICB with rc=1, the given path, and no children. - fn new_root(path: std::path::PathBuf) -> Self; - fn rc(&self) -> u64; - fn rc_mut(&mut self) -> &mut u64; -} -``` - -Note: `pub mod mescloud` becomes `mod dcache` (private, since consumers access `MescloudDCache` via the re-export). - -**Step 3: Update `src/fs/dcache/dcache.rs`** - -Remove the `InodeControlBlock` struct (lines 19-27), its `IcbLike` impl (lines 29-47), and the `blocks_of_size` function (lines 243-245). - -Replace the import `use super::{DCache, IcbLike};` with: - -```rust -use super::{DCache, IcbLike}; -use crate::fs::mescloud::dcache::InodeControlBlock; -``` - -The `use crate::fs::r#trait::...` import line should drop `Permissions` only if it was exclusively used by `InodeControlBlock`. Check: `Permissions` is still used by `make_common_file_attr` (line 193), so keep it. - -The file should now contain only: `InodeFactory`, `MescloudDCache`, and their impls. No `InodeControlBlock`, no `blocks_of_size`. - -**Step 4: Verify it compiles** - -Run: `cargo check 2>&1 | head -30` -Expected: Compiles successfully - -**Step 5: Commit** - -```bash -git add src/fs/dcache/ -git commit -m "Rename dcache/mescloud.rs to dcache/dcache.rs, import ICB from mescloud" -``` - ---- - -### Task 3: Update `src/fs/mescloud/common.rs` import path - -**Files:** -- Modify: `src/fs/mescloud/common.rs` - -**Step 1: Update the import** - -Change line 5 from: -```rust -pub(super) use crate::fs::dcache::mescloud::InodeControlBlock; -``` -to: -```rust -pub(super) use super::dcache::InodeControlBlock; -``` - -**Step 2: Verify it compiles** - -Run: `cargo check 2>&1 | head -30` -Expected: Compiles successfully - -**Step 3: Commit** - -```bash -git add src/fs/mescloud/common.rs -git commit -m "Update InodeControlBlock import in common.rs to use mescloud::dcache" -``` - ---- - -### Task 4: Update `src/fs/mescloud/repo.rs` import path - -**Files:** -- Modify: `src/fs/mescloud/repo.rs` - -**Step 1: Update the import** - -Change line 17 from: -```rust -use crate::fs::dcache::mescloud::{self as mescloud_dcache, MescloudDCache}; -``` -to: -```rust -use crate::fs::dcache::MescloudDCache; -use super::dcache as mescloud_dcache; -``` - -This keeps the `mescloud_dcache::blocks_of_size` call on line 138 working since `blocks_of_size` now lives in `src/fs/mescloud/dcache.rs`. - -**Step 2: Verify it compiles** - -Run: `cargo check 2>&1 | head -30` -Expected: Compiles successfully - -**Step 3: Commit** - -```bash -git add src/fs/mescloud/repo.rs -git commit -m "Update repo.rs imports to use mescloud::dcache for blocks_of_size" -``` - ---- - -### Task 5: Final verification and cleanup - -**Step 1: Full build check** - -Run: `cargo check 2>&1` -Expected: No errors, no warnings related to our changes - -**Step 2: Verify no remaining references to old path** - -Search for `dcache::mescloud::` across the codebase. Should find zero results (all references now go through the re-export or the new path). - -Run: `grep -r "dcache::mescloud" src/` -Expected: No output - -**Step 3: Verify file structure matches goal** - -``` -src/fs/dcache/ - mod.rs - re-exports DCache, MescloudDCache, IcbLike trait - dcache.rs - MescloudDCache, InodeFactory (imports InodeControlBlock from mescloud) - table.rs - generic DCache - bridge.rs - HashMapBridge - -src/fs/mescloud/ - mod.rs - MesaFS (top-level container) - dcache.rs - InodeControlBlock, blocks_of_size <-- NEW - common.rs - error types, re-exports InodeControlBlock - org.rs - OrgFs (single org) - repo.rs - RepoFs (single repo) -``` - -**Step 4: Commit (if any cleanup was needed)** - -```bash -git add -A -git commit -m "Final cleanup: verify decoupled dcache and mescloud ICB" -``` diff --git a/docs/plans/2026-02-06-default-run-subcommand.md b/docs/plans/2026-02-06-default-run-subcommand.md deleted file mode 100644 index 30ee9ae..0000000 --- a/docs/plans/2026-02-06-default-run-subcommand.md +++ /dev/null @@ -1,70 +0,0 @@ -# Default `run` Subcommand Implementation Plan - -> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. - -**Goal:** Make `run` the default subcommand so `git-fs` works without explicitly typing `run`. - -**Architecture:** Use clap's `Args::command` as `Option` and default to `Run { daemonize: false }` when no subcommand is provided. This is the idiomatic clap approach — no external crates or hacks needed. - -**Tech Stack:** Rust, clap 4.x (derive) - ---- - -### Task 1: Make `run` the default subcommand - -**Files:** -- Modify: `src/main.rs:30-31` (change `command` field to `Option`) -- Modify: `src/main.rs:56` (unwrap_or default to `Run`) - -**Step 1: Change the `Args` struct to make `command` optional** - -In `src/main.rs`, change the `Args` struct: - -```rust -struct Args { - #[arg( - short, - long, - value_parser, - help = "Optional path to a mesa config TOML." - )] - config_path: Option, - - #[command(subcommand)] - command: Option, -} -``` - -**Step 2: Default to `Run` when no subcommand is given** - -In the `main()` function, change: - -```rust - match args.command { -``` - -to: - -```rust - match args.command.unwrap_or(Command::Run { daemonize: false }) { -``` - -**Step 3: Build and verify it compiles** - -Run: `cargo check` -Expected: Compiles with no errors. - -**Step 4: Manual smoke test** - -Run: `cargo build && ./target/debug/git-fs --help` -Expected: Help text shows `run` and `reload` as subcommands, but `run` is no longer required. - -Run: `cargo build && ./target/debug/git-fs` (without `run`) -Expected: Behaves the same as `git-fs run` (attempts to start the daemon). - -**Step 5: Commit** - -```bash -git add src/main.rs -git commit -m "MES-707: make run the default subcommand" -``` diff --git a/docs/plans/2026-02-09-async-icache-resolver.md b/docs/plans/2026-02-09-async-icache-resolver.md deleted file mode 100644 index e817625..0000000 --- a/docs/plans/2026-02-09-async-icache-resolver.md +++ /dev/null @@ -1,858 +0,0 @@ -# Async ICache Resolver Refactor Plan - -> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. - -**Goal:** Refactor `AsyncICache` so the cache manages the full InFlight lifecycle internally via an `IcbResolver` trait, eliminating the spin-lock race condition and removing manual `mark_inflight`/`complete` calls. - -**Architecture:** Replace `Arc` with `tokio::sync::watch` channels to eliminate the race condition where a notification fires between cloning the handle and awaiting it. Introduce an `IcbResolver` trait that acts as a "promise" to eventually produce an ICB for a given inode. The cache itself manages the InFlight→Available transition via a new `get_or_resolve` method. The struct becomes `AsyncICache` with the ICB type derived from `R::Icb`. Public `mark_inflight`/`complete` are removed. - -**Tech Stack:** Rust 2024, `scc` 3.5.6 (`HashMap`), `tokio::sync::watch`, `std::sync::atomic::AtomicU64`, RPITIT (return-position impl trait in traits) - ---- - -## Key Design Decisions - -### `IcbResolver` trait - -```rust -pub trait IcbResolver: Send + Sync { - type Icb: IcbLike + Send + Sync; - type Error: Send; - - fn resolve( - &self, - ino: Inode, - ) -> impl Future> + Send; -} -``` - -Uses RPITIT (Rust 2024 edition) instead of `#[async_trait]` — no heap allocation. - -### `IcbState` with `watch` - -```rust -pub enum IcbState { - InFlight(watch::Receiver<()>), - Available(I), -} -``` - -The `watch::Sender<()>` is held by the task performing resolution. When the sender is dropped (whether from success or failure), all `Receiver::changed().await` calls wake up with `Err(RecvError)` — guaranteeing no missed notifications. - -### `AsyncICache` - -```rust -pub struct AsyncICache { - resolver: R, - inode_table: ConcurrentHashMap>, - next_fh: AtomicU64, -} -``` - -Single type parameter. ICB type is `R::Icb`. - -### `wait_for_available` — no loop - -```rust -async fn wait_for_available(&self, ino: Inode) -> bool { - let rx = self.inode_table.read_async(&ino, |_, s| match s { - IcbState::InFlight(rx) => Some(rx.clone()), - IcbState::Available(_) => None, - }).await; - - match rx { - None => false, // key missing - Some(None) => true, // Available - Some(Some(mut rx)) => { - // Wait for sender to signal (or drop) - let _ = rx.changed().await; - // Re-check: entry should now be Available or removed - self.inode_table.read_async(&ino, |_, s| - matches!(s, IcbState::Available(_)) - ).await.unwrap_or(false) - } - } -} -``` - -No loop. `watch::Receiver::changed()` never misses — if the sender already signaled or was dropped before `.changed().await`, it returns immediately. - -### `get_or_resolve` — cache-managed lifecycle - -```rust -pub async fn get_or_resolve( - &self, - ino: Inode, - then: impl FnOnce(&I) -> R2, -) -> Result { ... } -``` - -1. Check if `Available` → run `then`, return -2. If `InFlight` → clone receiver, await, re-check, run `then` -3. If absent → insert `InFlight(rx)`, call `resolver.resolve(ino).await`, on success upsert `Available`, on error remove entry, wake all waiters either way - -### Removed public API - -- `mark_inflight` — removed (internal only via `get_or_resolve`) -- `complete` — removed (internal only via `get_or_resolve`) - -### Retained public API (unchanged signatures) - -- `new(resolver, root_ino, root_path)` — now takes resolver as first arg -- `allocate_fh(&self) -> FileHandle` -- `inode_count(&self) -> usize` -- `contains(&self, ino) -> bool` (async, awaits InFlight) -- `get_icb(&self, ino, f) -> Option` (async, awaits InFlight) -- `get_icb_mut(&self, ino, f) -> Option` (async, awaits InFlight) -- `insert_icb(&self, ino, icb)` (async) -- `entry_or_insert_icb(&self, ino, factory, then) -> R` (async) -- `inc_rc(&self, ino) -> u64` (async) -- `forget(&self, ino, nlookups) -> Option` (async) -- `for_each(&self, f)` (sync iteration) - ---- - -## Files - -- **Modify:** `src/fs/icache/async_cache.rs` (all tasks) -- **Modify:** `src/fs/icache/mod.rs` (Task 1 only — re-export `IcbResolver`) - ---- - -## Task 1: Add `IcbResolver` trait and make `AsyncICache` generic over resolver - -**Files:** -- Modify: `src/fs/icache/async_cache.rs` -- Modify: `src/fs/icache/mod.rs` - -**Step 1: Write the failing test** - -Add a new `TestResolver` to the test module and update `TestIcb` and test helpers. Replace the first two tests (`contains_returns_true_for_root`, `new_creates_root_entry`) to use the resolver-based constructor. - -Add at the top of `mod tests`: - -```rust -use std::collections::HashMap as StdHashMap; -use std::sync::Mutex; - -struct TestResolver { - /// Pre-loaded responses keyed by inode. - responses: Mutex>>, -} - -impl TestResolver { - fn new() -> Self { - Self { - responses: Mutex::new(StdHashMap::new()), - } - } - - fn add(&self, ino: Inode, icb: TestIcb) { - self.responses.lock().unwrap().insert(ino, Ok(icb)); - } - - fn add_err(&self, ino: Inode, err: impl Into) { - self.responses.lock().unwrap().insert(ino, Err(err.into())); - } -} - -impl IcbResolver for TestResolver { - type Icb = TestIcb; - type Error = String; - - fn resolve( - &self, - ino: Inode, - ) -> impl Future> + Send { - let result = self - .responses - .lock() - .unwrap() - .remove(&ino) - .unwrap_or_else(|| Err(format!("no response for inode {ino}"))); - async move { result } - } -} - -/// Helper: build a cache with a `TestResolver` at root inode 1. -fn test_cache() -> AsyncICache { - AsyncICache::new(TestResolver::new(), 1, "/root") -} - -/// Helper: build a cache with a given resolver at root inode 1. -fn test_cache_with(resolver: TestResolver) -> AsyncICache { - AsyncICache::new(resolver, 1, "/root") -} -``` - -Update the existing `new_creates_root_entry` and `contains_returns_true_for_root` tests to use `test_cache()`: - -```rust -#[tokio::test] -async fn new_creates_root_entry() { - let cache = test_cache(); - assert_eq!(cache.inode_count(), 1, "should have exactly 1 entry"); -} - -#[tokio::test] -async fn contains_returns_true_for_root() { - let cache = test_cache(); - assert!(cache.contains(1).await, "root should exist"); -} -``` - -**Step 2: Run tests to verify they fail** - -Run: `cargo test -p git-fs icache::async_cache -- --nocapture` -Expected: FAIL — `IcbResolver` trait and new `AsyncICache::new` signature don't exist - -**Step 3: Implement `IcbResolver` trait and update `AsyncICache`** - -At the top of `async_cache.rs`, add the `Future` import and trait definition: - -```rust -use std::future::Future; -``` - -```rust -/// Trait for resolving an inode to its control block. -/// -/// Implementations act as a "promise" that an ICB will eventually be produced -/// for a given inode. The cache calls `resolve` when it needs to populate a -/// missing entry. -pub trait IcbResolver: Send + Sync { - /// The inode control block type this resolver produces. - type Icb: IcbLike + Send + Sync; - /// Error type returned when resolution fails. - type Error: Send; - - /// Resolve an inode to its control block. - fn resolve( - &self, - ino: Inode, - ) -> impl Future> + Send; -} -``` - -Change the struct: - -```rust -pub struct AsyncICache { - resolver: R, - inode_table: ConcurrentHashMap>, - next_fh: AtomicU64, -} -``` - -Change the `impl` block signature: - -```rust -impl AsyncICache { -``` - -Update the constructor to accept a resolver: - -```rust -pub fn new(resolver: R, root_ino: Inode, root_path: impl Into) -> Self { - let table = ConcurrentHashMap::new(); - drop(table.insert_sync( - root_ino, - IcbState::Available(R::Icb::new_root(root_path.into())), - )); - Self { - resolver, - inode_table: table, - next_fh: AtomicU64::new(1), - } -} -``` - -Replace all `I` type references in method signatures/bodies with `R::Icb`. Specifically: - -- `IcbState` → already generic, no change needed (the enum stays `IcbState`) -- In method signatures: `icb: I` → `icb: R::Icb`, `FnOnce(&I)` → `FnOnce(&R::Icb)`, `FnOnce(&mut I)` → `FnOnce(&mut R::Icb)`, `FnOnce() -> I` → `FnOnce() -> R::Icb`, `Option` → `Option` -- Remove the `I: IcbLike + Send + Sync` bound from the impl block (it's now derived from `R::Icb`) - -Update all existing tests to use `test_cache()` instead of `AsyncICache::::new(1, "/root")`. - -For tests that used `Arc::new(AsyncICache::::new(...))`, use `Arc::new(test_cache())` instead. - -For tests that used `cache.mark_inflight(42).await` and `cache.complete(42, ...)`, keep them compiling for now (they still exist); they'll be removed in Task 4. - -**Step 4: Update `mod.rs`** - -Add `IcbResolver` to the re-exports: - -```rust -#[expect(unused_imports)] -pub use async_cache::AsyncICache; -#[expect(unused_imports)] -pub use async_cache::IcbResolver; -``` - -**Step 5: Run tests to verify they pass** - -Run: `cargo test -p git-fs icache::async_cache -- --nocapture` -Expected: PASS (19 tests) - -**Step 6: Run clippy** - -Run: `cargo clippy -p git-fs -- -D warnings` -Expected: PASS - -**Step 7: Commit** - -```bash -git add src/fs/icache/async_cache.rs src/fs/icache/mod.rs -git commit -m "refactor(icache): add IcbResolver trait, make AsyncICache generic over resolver" -``` - ---- - -## Task 2: Replace `Notify` with `watch` and rewrite `wait_for_available` - -**Files:** -- Modify: `src/fs/icache/async_cache.rs` - -**Step 1: Write the failing test** - -Add a test that validates the race-condition-free behavior — specifically that waiting on an already-completed entry doesn't hang: - -```rust -#[tokio::test] -async fn wait_does_not_miss_signal_on_immediate_complete() { - let cache = Arc::new(test_cache()); - - // Insert InFlight, then immediately complete before anyone waits - let (tx, rx) = tokio::sync::watch::channel(()); - cache - .inode_table - .upsert_async(42, IcbState::InFlight(rx)) - .await; - - // Complete before any waiter — drop sender to signal - cache - .insert_icb( - 42, - TestIcb { - rc: 1, - path: "/fast".into(), - }, - ) - .await; - drop(tx); - - // This must NOT hang — the signal was already sent - let result = tokio::time::timeout( - std::time::Duration::from_millis(100), - cache.contains(42), - ) - .await; - assert_eq!(result, Ok(true), "should not hang on already-completed entry"); -} -``` - -**Step 2: Run test to verify it fails** - -Run: `cargo test -p git-fs icache::async_cache::tests::wait_does_not_miss_signal -- --nocapture` -Expected: FAIL — `IcbState::InFlight` expects `Arc`, not `watch::Receiver` - -**Step 3: Replace `Notify` with `watch` throughout** - -1. Change imports: - -Remove: -```rust -use std::sync::Arc; -use tokio::sync::Notify; -``` - -Add: -```rust -use tokio::sync::watch; -``` - -2. Update `IcbState`: - -```rust -pub enum IcbState { - /// Entry is being loaded; waiters clone the receiver and `.changed().await`. - InFlight(watch::Receiver<()>), - /// Entry is ready for use. - Available(I), -} -``` - -3. Rewrite `wait_for_available` (no loop): - -```rust -async fn wait_for_available(&self, ino: Inode) -> bool { - let rx = self - .inode_table - .read_async(&ino, |_, s| match s { - IcbState::InFlight(rx) => Some(rx.clone()), - IcbState::Available(_) => None, - }) - .await; - - match rx { - None => false, // key missing - Some(None) => true, // Available - Some(Some(mut rx)) => { - // Wait for the resolver to complete (or fail/drop sender). - // changed() returns Err(RecvError) when sender is dropped, - // which is fine — it means resolution finished. - let _ = rx.changed().await; - // Re-check: entry is now Available or was removed on error. - self.inode_table - .read_async(&ino, |_, s| matches!(s, IcbState::Available(_))) - .await - .unwrap_or(false) - } - } -} -``` - -4. Update `mark_inflight` to use `watch`: - -```rust -pub async fn mark_inflight(&self, ino: Inode) -> watch::Sender<()> { - let (tx, rx) = watch::channel(()); - self.inode_table - .upsert_async(ino, IcbState::InFlight(rx)) - .await; - tx -} -``` - -5. Update `complete` to accept `watch::Sender`: - -Actually, `complete` no longer needs the sender — it just upserts Available. The old sender being dropped will notify waiters. But we need the sender to still be alive when `complete` is called to ensure proper sequencing. Simplify: `complete` upserts Available. The caller drops the sender afterward (or it's already dropped). - -```rust -pub async fn complete(&self, ino: Inode, icb: R::Icb) { - self.inode_table - .upsert_async(ino, IcbState::Available(icb)) - .await; - // Waiters wake when the sender (held by caller) is dropped. - // If sender was already dropped, waiters already woke from changed().await Err. -} -``` - -6. Update `entry_or_insert_icb` — replace `Arc::clone(notify)` with `rx.clone()`: - -```rust -IcbState::InFlight(rx) => { - let mut rx = rx.clone(); - drop(occ); // release shard lock before awaiting - let _ = rx.changed().await; -} -``` - -7. Update all tests that use `mark_inflight`/`complete`: - -Replace `let _notify = cache.mark_inflight(42).await;` with `let _tx = cache.mark_inflight(42).await;`. - -The `complete` calls remain the same signature (still takes `ino, icb`), but now the `_tx` being dropped after `complete` signals the waiters. In tests, ensure `_tx` is dropped *after* `complete`: - -```rust -// In tests that use mark_inflight + complete: -let tx = cache.mark_inflight(42).await; -// ... spawn task that waits ... -cache.complete(42, TestIcb { ... }).await; -drop(tx); // signal all waiters -``` - -**Step 4: Run tests to verify they pass** - -Run: `cargo test -p git-fs icache::async_cache -- --nocapture` -Expected: PASS (20 tests — 19 existing + 1 new) - -**Step 5: Run clippy** - -Run: `cargo clippy -p git-fs -- -D warnings` -Expected: PASS - -**Step 6: Commit** - -```bash -git add src/fs/icache/async_cache.rs -git commit -m "refactor(icache): replace Notify with watch channels, eliminate spin-lock" -``` - ---- - -## Task 3: Add `get_or_resolve` method - -**Files:** -- Modify: `src/fs/icache/async_cache.rs` - -**Step 1: Write the failing tests** - -```rust -#[tokio::test] -async fn get_or_resolve_returns_existing() { - let cache = test_cache(); - cache - .insert_icb(42, TestIcb { rc: 1, path: "/existing".into() }) - .await; - - let path: Result = cache - .get_or_resolve(42, |icb| icb.path.clone()) - .await; - assert_eq!(path, Ok(PathBuf::from("/existing"))); -} - -#[tokio::test] -async fn get_or_resolve_resolves_missing() { - let resolver = TestResolver::new(); - resolver.add(42, TestIcb { rc: 1, path: "/resolved".into() }); - let cache = test_cache_with(resolver); - - let path: Result = cache - .get_or_resolve(42, |icb| icb.path.clone()) - .await; - assert_eq!(path, Ok(PathBuf::from("/resolved"))); - // Should now be cached - assert!(cache.contains(42).await); -} - -#[tokio::test] -async fn get_or_resolve_propagates_error() { - let resolver = TestResolver::new(); - resolver.add_err(42, "network error"); - let cache = test_cache_with(resolver); - - let result: Result = cache - .get_or_resolve(42, |icb| icb.path.clone()) - .await; - assert_eq!(result, Err("network error".to_owned())); - // Entry should be cleaned up on error - assert!(!cache.contains(42).await); -} - -#[tokio::test] -async fn get_or_resolve_coalesces_concurrent_requests() { - use std::sync::atomic::{AtomicUsize, Ordering}; - - let resolve_count = Arc::new(AtomicUsize::new(0)); - let resolve_count2 = Arc::clone(&resolve_count); - - // A resolver that counts calls and delays slightly - struct CountingResolver { - count: Arc, - } - impl IcbResolver for CountingResolver { - type Icb = TestIcb; - type Error = String; - fn resolve( - &self, - _ino: Inode, - ) -> impl Future> + Send { - self.count.fetch_add(1, Ordering::SeqCst); - async { - tokio::task::yield_now().await; - Ok(TestIcb { rc: 1, path: "/coalesced".into() }) - } - } - } - - let cache = Arc::new(AsyncICache::new( - CountingResolver { count: resolve_count2 }, - 1, - "/root", - )); - - // Spawn 5 concurrent get_or_resolve for the same inode - let mut handles = Vec::new(); - for _ in 0..5 { - let c = Arc::clone(&cache); - handles.push(tokio::spawn(async move { - c.get_or_resolve(42, |icb| icb.path.clone()).await - })); - } - - for h in handles { - assert_eq!( - h.await.expect("task panicked"), - Ok(PathBuf::from("/coalesced")), - ); - } - - // Resolver should only have been called ONCE (not 5 times) - assert_eq!(resolve_count.load(Ordering::SeqCst), 1, "should coalesce to 1 resolve call"); -} -``` - -**Step 2: Run tests to verify they fail** - -Run: `cargo test -p git-fs icache::async_cache -- --nocapture` -Expected: FAIL — `get_or_resolve` doesn't exist - -**Step 3: Implement `get_or_resolve`** - -```rust -/// Look up `ino`. If `Available`, run `then` and return `Ok(R)`. -/// If absent, call the resolver to fetch the ICB, cache it, then run `then`. -/// If another task is already resolving this inode (`InFlight`), wait for it. -/// -/// Returns `Err(R::Error)` if resolution fails. On error the `InFlight` -/// entry is removed so subsequent calls can retry. -pub async fn get_or_resolve( - &self, - ino: Inode, - then: impl FnOnce(&R::Icb) -> R2, -) -> Result { - use scc::hash_map::Entry; - - // Fast path: already Available - { - let hit = self - .inode_table - .read_async(&ino, |_, s| match s { - IcbState::Available(icb) => Some(then(icb)), - IcbState::InFlight(_) => None, - }) - .await; - match hit { - Some(Some(r)) => return Ok(r), - Some(None) => { /* InFlight — fall through */ } - None => { /* absent — fall through */ } - } - } - - // Try to become the resolver, or wait on existing InFlight - let mut then_fn = Some(then); - loop { - match self.inode_table.entry_async(ino).await { - Entry::Occupied(mut occ) => match occ.get_mut() { - IcbState::Available(icb) => { - let t = then_fn.take().unwrap_or_else(|| unreachable!()); - return Ok(t(icb)); - } - IcbState::InFlight(rx) => { - let mut rx = rx.clone(); - drop(occ); - let _ = rx.changed().await; - // Re-check on next loop iteration - } - }, - Entry::Vacant(vac) => { - // We win the race — install InFlight and resolve - let (tx, rx) = watch::channel(()); - vac.insert_entry(IcbState::InFlight(rx)); - - match self.resolver.resolve(ino).await { - Ok(icb) => { - let t = then_fn.take().unwrap_or_else(|| unreachable!()); - let result = self - .inode_table - .update_async(&ino, |_, state| { - *state = IcbState::Available(icb); - }) - .await; - // If update_async returned None, entry was removed - // between our insert and here (shouldn't happen, but - // handle gracefully). - if result.is_none() { - // Re-read to get the value we just set — but the - // entry was removed, so we need to re-insert. - // This is an edge case that shouldn't occur in - // practice. For safety, drop tx and retry. - drop(tx); - } else { - // Read the now-Available value to run `then` - drop(tx); // wake all waiters - let r = self - .inode_table - .read_async(&ino, |_, s| match s { - IcbState::Available(icb) => Some(t(icb)), - IcbState::InFlight(_) => None, - }) - .await - .flatten(); - if let Some(r) = r { - return Ok(r); - } - } - // Extremely unlikely fallthrough — retry - } - Err(e) => { - // Remove the InFlight entry - self.inode_table.remove_async(&ino).await; - drop(tx); // wake all waiters — they'll see entry missing - return Err(e); - } - } - } - } - } -} -``` - -> **Note on the loop:** Unlike `wait_for_available`, this loop only iterates if: -> (a) we were waiting on InFlight and it completed — we loop back to read Available, or -> (b) an extremely unlikely race removed our entry — we retry. -> It is NOT a spin-lock: every iteration either returns or awaits a `watch::changed()`. - -**Step 4: Run tests to verify they pass** - -Run: `cargo test -p git-fs icache::async_cache -- --nocapture` -Expected: PASS (23 tests — 20 existing + 4 new, though the coalescing test may need the `CountingResolver` to be defined at module level if inner items with impls are not supported) - -**Step 5: Run clippy** - -Run: `cargo clippy -p git-fs -- -D warnings` -Expected: PASS - -**Step 6: Commit** - -```bash -git add src/fs/icache/async_cache.rs -git commit -m "feat(icache): add get_or_resolve with automatic InFlight lifecycle management" -``` - ---- - -## Task 4: Remove public `mark_inflight`/`complete`, update tests - -**Files:** -- Modify: `src/fs/icache/async_cache.rs` - -**Step 1: Make `mark_inflight` and `complete` private (or remove)** - -Remove `pub` from `mark_inflight` and `complete`. If they are only used by tests, move them into a `#[cfg(test)]` impl block or remove entirely if tests have been updated. - -Check: are `mark_inflight` and `complete` still used by any test? - -Tests that used them: -- `contains_awaits_inflight_then_returns_true` -- `get_icb_awaits_inflight` -- `entry_or_insert_awaits_inflight` -- `for_each_skips_inflight` -- `wait_does_not_miss_signal_on_immediate_complete` - -**Step 2: Rewrite these tests to use `get_or_resolve` instead** - -Replace tests that manually managed InFlight with resolver-based tests: - -```rust -#[tokio::test] -async fn contains_awaits_inflight_then_returns_true() { - let resolver = TestResolver::new(); - resolver.add(42, TestIcb { rc: 1, path: "/test".into() }); - let cache = Arc::new(test_cache_with(resolver)); - - // Trigger resolve in background - let cache2 = Arc::clone(&cache); - let handle = tokio::spawn(async move { - cache2.get_or_resolve(42, |_| ()).await - }); - - handle.await.expect("task panicked").expect("resolve failed"); - assert!(cache.contains(42).await, "should be true after resolve"); -} - -#[tokio::test] -async fn get_icb_awaits_inflight_via_resolver() { - use std::sync::atomic::{AtomicBool, Ordering}; - - let resolver = TestResolver::new(); - resolver.add(42, TestIcb { rc: 1, path: "/loaded".into() }); - let cache = Arc::new(test_cache_with(resolver)); - - // Resolve inode 42 first - let _: Result<(), String> = cache.get_or_resolve(42, |_| ()).await; - - let path = cache.get_icb(42, |icb| icb.path.clone()).await; - assert_eq!(path, Some(PathBuf::from("/loaded"))); -} - -#[tokio::test] -async fn for_each_skips_inflight_via_resolver() { - // Use a resolver that never responds (we'll insert InFlight manually for test) - let cache = test_cache(); - // Directly insert an InFlight entry for testing iteration - let (_tx, rx) = watch::channel(()); - cache - .inode_table - .upsert_async(42, IcbState::InFlight(rx)) - .await; - - let mut count = 0; - cache.for_each(|_, _| { - count += 1; - }); - assert_eq!(count, 1, "only root, not the InFlight entry"); -} -``` - -**Step 3: Remove `mark_inflight` and `complete` methods entirely** - -Delete the `mark_inflight` and `complete` methods from the impl block. Also remove the `// -- InFlight management --` section comment. - -**Step 4: Run tests to verify they pass** - -Run: `cargo test -p git-fs icache::async_cache -- --nocapture` -Expected: PASS - -**Step 5: Run clippy and fmt** - -Run: `cargo clippy -p git-fs -- -D warnings && cargo fmt -p git-fs --check` -Expected: PASS - -**Step 6: Commit** - -```bash -git add src/fs/icache/async_cache.rs -git commit -m "refactor(icache): remove public mark_inflight/complete, use resolver-driven lifecycle" -``` - ---- - -## Task 5: Clean up `entry_or_insert_icb` InFlight handling - -**Files:** -- Modify: `src/fs/icache/async_cache.rs` - -Now that the cache owns the resolver, `entry_or_insert_icb` should also use the resolver pattern for InFlight entries instead of its own loop. However, `entry_or_insert_icb` serves a different purpose — it's for callers that already have an ICB to insert (factory pattern). The InFlight wait inside it should use the `watch`-based wait (which it does after Task 2). - -**Step 1: Verify `entry_or_insert_icb` uses watch correctly** - -Read the current state and verify: -- InFlight branch clones `rx`, drops the entry, awaits `rx.changed()` -- No `Arc` references remain anywhere - -**Step 2: Audit for any remaining `Notify` or `Arc` imports** - -Search the file for any `Notify`, `Arc`, or `use std::sync::Arc` — remove if unused. - -**Step 3: Run full test suite** - -Run: `cargo test -p git-fs icache::async_cache -- --nocapture` -Expected: PASS - -**Step 4: Run clippy and fmt** - -Run: `cargo clippy -p git-fs -- -D warnings && cargo fmt -p git-fs --check` -Expected: PASS - -**Step 5: Commit (if changes were needed)** - -```bash -git add src/fs/icache/async_cache.rs -git commit -m "refactor(icache): clean up remaining Notify references" -``` - ---- - -## Verification - -After all tasks are complete: - -1. **Run all async_cache tests:** `cargo test -p git-fs icache::async_cache -- --nocapture` -2. **Run full test suite:** `cargo test -p git-fs` -3. **Check lints:** `cargo clippy -p git-fs -- -D warnings` -4. **Check formatting:** `cargo fmt -p git-fs --check` - -All commands should pass without errors or warnings. diff --git a/docs/plans/2026-02-09-mesa-dev-migration.md b/docs/plans/2026-02-09-mesa-dev-migration.md deleted file mode 100644 index 88f0188..0000000 --- a/docs/plans/2026-02-09-mesa-dev-migration.md +++ /dev/null @@ -1,469 +0,0 @@ -# mesa-dev 0.1.1 → 1.8.0 Migration Plan - -> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. - -**Goal:** Fix all compilation errors caused by upgrading mesa-dev from 0.1.1 to 1.8.0. - -**Architecture:** The mesa-dev crate restructured its API from flat module access (`mesa_dev::Mesa`, `mesa_dev::models::*`, `mesa_dev::error::*`) to a hierarchical client pattern (`MesaClient::builder()...build()`, `client.org().repos().at().content()`) with OpenAPI-generated model types. We need to update 4 files in `src/fs/mescloud/` to use the new imports, builder pattern, navigation API, and model types. - -**Tech Stack:** Rust, mesa-dev 1.8.0, mesa_dev_oapi 1.8.0, thiserror, futures - ---- - -## API Change Summary - -| Old (0.1.1) | New (1.8.0) | -|---|---| -| `mesa_dev::Mesa` | `mesa_dev::MesaClient` | -| `mesa_dev::error::MesaError` | `mesa_dev::low_level::apis::Error` (generic per endpoint) | -| `mesa_dev::models::Repo` | `mesa_dev::models::GetByOrgRepos200ResponseReposInner` (list) / `mesa_dev::models::PostByOrgRepos201Response` (get) | -| `mesa_dev::models::Content` | `mesa_dev::low_level::content::Content` | -| `mesa_dev::models::Content::File { size, content, .. }` | `Content::File(f)` where `f.size: f64`, `f.content: Option` | -| `mesa_dev::models::Content::Dir { entries, .. }` | `Content::Dir(d)` where `d.entries: Vec` | -| `mesa_dev::models::DirEntryType::{File, Dir}` | `mesa_dev::low_level::content::DirEntry::{File(_), Symlink(_), Dir(_)}` | -| `Mesa::builder(api_key).base_url(url).build()` | `MesaClient::builder().with_api_key(key).with_base_path(url).build()` | -| `client.content(org, repo).get(path, ref_)` | `client.org(org).repos().at(repo).content().get(ref_, path, depth)` | -| `client.repos(org).get(repo)` | `client.org(org).repos().at(repo).get()` | -| `client.repos(org).list_all()` | `client.org(org).repos().list(None)` | -| `Repo.default_branch: String` | `repo.default_branch: Option` | -| `Repo.name: String` | `repo.name: Option` | -| `Repo.status: Option` | Field removed (no sync status on model) | - ---- - -### Task 1: Fix error types in `common.rs` - -**Files:** -- Modify: `src/fs/mescloud/common.rs` - -The old `mesa_dev::error::MesaError` no longer exists. The new error type `mesa_dev::low_level::apis::Error` is generic (different `T` per endpoint), so we can't use `#[from]`. Change to storing a `String` and use `.map_err()` at call sites. - -**Step 1: Update the three error enums** - -Replace all three occurrences of: -```rust -#[error("remote mesa error: {0}")] -RemoteMesaError(#[from] mesa_dev::error::MesaError), -``` - -With: -```rust -#[error("remote mesa error: {0}")] -RemoteMesaError(String), -``` - -This affects `LookupError` (line 16), `ReadError` (line 65), and `ReadDirError` (line 91). - -**Step 2: Verify the file is self-consistent** - -No further changes needed in `common.rs` — the `From<...> for i32` impls still work since they match on the variant name, not the inner type. - ---- - -### Task 2: Fix client import and builder in `mod.rs` - -**Files:** -- Modify: `src/fs/mescloud/mod.rs` - -**Step 1: Update the import** - -Change line 5 from: -```rust -use mesa_dev::Mesa as MesaClient; -``` -To: -```rust -use mesa_dev::MesaClient; -``` - -**Step 2: Update the builder call** - -Change the client construction in `MesaFS::new()` (lines 70-72) from: -```rust -let client = MesaClient::builder(org_conf.api_key.expose_secret()) - .base_url(MESA_API_BASE_URL) - .build(); -``` -To: -```rust -let client = MesaClient::builder() - .with_api_key(org_conf.api_key.expose_secret()) - .with_base_path(MESA_API_BASE_URL) - .build(); -``` - ---- - -### Task 3: Fix `org.rs` — imports, types, and API calls - -**Files:** -- Modify: `src/fs/mescloud/org.rs` - -**Step 1: Update the import** - -Change line 7 from: -```rust -use mesa_dev::Mesa as MesaClient; -``` -To: -```rust -use mesa_dev::MesaClient; -``` - -**Step 2: Update `wait_for_sync` method** - -The old `Repo` model had a `status` field; the new one does not. Change the method signature and body (lines 282-293). - -From: -```rust -async fn wait_for_sync( - &self, - repo_name: &str, -) -> Result { - let mut repo = self.client.repos(&self.name).get(repo_name).await?; - while repo.status.is_some() { - trace!(repo = repo_name, "repo is syncing, waiting..."); - tokio::time::sleep(std::time::Duration::from_secs(1)).await; - repo = self.client.repos(&self.name).get(repo_name).await?; - } - Ok(repo) -} -``` - -To: -```rust -async fn wait_for_sync( - &self, - repo_name: &str, -) -> Result { - self.client - .org(&self.name) - .repos() - .at(repo_name) - .get() - .await - .map_err(|e| e.to_string()) -} -``` - -**Step 3: Update `lookup` — OrgRoot branch** - -In lines 375-381, the `?` on `wait_for_sync` now returns a `String` error. Update to map into `LookupError`: - -From: -```rust -let repo = self.wait_for_sync(name_str).await?; - -let (ino, attr) = self.ensure_repo_inode( - name_str, - name_str, - &repo.default_branch, - Self::ROOT_INO, -); -``` - -To: -```rust -let repo = self - .wait_for_sync(name_str) - .await - .map_err(LookupError::RemoteMesaError)?; - -let default_branch = repo.default_branch.as_deref().unwrap_or("main"); -let (ino, attr) = self.ensure_repo_inode( - name_str, - name_str, - default_branch, - Self::ROOT_INO, -); -``` - -**Step 4: Update `lookup` — OwnerDir branch** - -Similarly update lines 408-411: - -From: -```rust -let repo = self.wait_for_sync(&encoded).await?; - -let (ino, attr) = - self.ensure_repo_inode(&encoded, repo_name_str, &repo.default_branch, parent); -``` - -To: -```rust -let repo = self - .wait_for_sync(&encoded) - .await - .map_err(LookupError::RemoteMesaError)?; - -let default_branch = repo.default_branch.as_deref().unwrap_or("main"); -let (ino, attr) = - self.ensure_repo_inode(&encoded, repo_name_str, default_branch, parent); -``` - -**Step 5: Update `readdir` — OrgRoot branch** - -Change the repo listing (lines 467-478) from: -```rust -let repos: Vec = self - .client - .repos(&self.name) - .list_all() - .try_collect() - .await?; - -let repo_infos: Vec<(String, String)> = repos - .into_iter() - .filter(|r| r.status.is_none()) // skip repos still syncing - .map(|r| (r.name, r.default_branch)) - .collect(); -``` - -To: -```rust -let repos: Vec = self - .client - .org(&self.name) - .repos() - .list(None) - .try_collect() - .await - .map_err(|e| ReadDirError::RemoteMesaError(e.to_string()))?; - -let repo_infos: Vec<(String, String)> = repos - .into_iter() - .filter_map(|r| { - let name = r.name?; - let branch = r.default_branch.unwrap_or_else(|| "main".to_owned()); - Some((name, branch)) - }) - .collect(); -``` - ---- - -### Task 4: Fix `repo.rs` — imports, content API, and pattern matching - -**Files:** -- Modify: `src/fs/mescloud/repo.rs` - -**Step 1: Update imports** - -Change line 9 from: -```rust -use mesa_dev::Mesa as MesaClient; -``` -To: -```rust -use mesa_dev::MesaClient; -use mesa_dev::low_level::content::{Content, DirEntry as MesaDirEntry}; -``` - -**Step 2: Update `lookup` — content API call** - -Change lines 121-125 from: -```rust -let content = self - .client - .content(&self.org_name, &self.repo_name) - .get(file_path.as_deref(), Some(self.ref_.as_str())) - .await?; -``` - -To: -```rust -let content = self - .client - .org(&self.org_name) - .repos() - .at(&self.repo_name) - .content() - .get(Some(self.ref_.as_str()), file_path.as_deref(), None) - .await - .map_err(|e| LookupError::RemoteMesaError(e.to_string()))?; -``` - -Note: parameter order changed from `(path, ref)` to `(ref, path, depth)`. - -**Step 3: Update `lookup` — Content pattern matching** - -Change lines 127-144 from: -```rust -let kind = match &content { - mesa_dev::models::Content::File { .. } => DirEntryType::RegularFile, - mesa_dev::models::Content::Dir { .. } => DirEntryType::Directory, -}; - -let (ino, _) = self.icache.ensure_child_inode(parent, name, kind); - -let now = SystemTime::now(); -let attr = match content { - mesa_dev::models::Content::File { size, .. } => FileAttr::RegularFile { - common: self.icache.make_common_file_attr(ino, 0o644, now, now), - size, - blocks: mescloud_icache::blocks_of_size(Self::BLOCK_SIZE, size), - }, - mesa_dev::models::Content::Dir { .. } => FileAttr::Directory { - common: self.icache.make_common_file_attr(ino, 0o755, now, now), - }, -}; -``` - -To: -```rust -let kind = match &content { - Content::File(_) | Content::Symlink(_) => DirEntryType::RegularFile, - Content::Dir(_) => DirEntryType::Directory, -}; - -let (ino, _) = self.icache.ensure_child_inode(parent, name, kind); - -let now = SystemTime::now(); -let attr = match &content { - Content::File(f) | Content::Symlink(f) => { - #[expect(clippy::cast_sign_loss, clippy::cast_possible_truncation)] - let size = f.size as u64; - FileAttr::RegularFile { - common: self.icache.make_common_file_attr(ino, 0o644, now, now), - size, - blocks: mescloud_icache::blocks_of_size(Self::BLOCK_SIZE, size), - } - } - Content::Dir(_) => FileAttr::Directory { - common: self.icache.make_common_file_attr(ino, 0o755, now, now), - }, -}; -``` - -**Step 4: Update `readdir` — content API call** - -Change lines 180-184 from: -```rust -let content = self - .client - .content(&self.org_name, &self.repo_name) - .get(file_path.as_deref(), Some(self.ref_.as_str())) - .await?; -``` - -To: -```rust -let content = self - .client - .org(&self.org_name) - .repos() - .at(&self.repo_name) - .content() - .get(Some(self.ref_.as_str()), file_path.as_deref(), None) - .await - .map_err(|e| ReadDirError::RemoteMesaError(e.to_string()))?; -``` - -**Step 5: Update `readdir` — Content + DirEntry pattern matching** - -Change lines 186-200 from: -```rust -let mesa_entries = match content { - mesa_dev::models::Content::Dir { entries, .. } => entries, - mesa_dev::models::Content::File { .. } => return Err(ReadDirError::NotADirectory), -}; - -let collected: Vec<_> = mesa_entries - .into_iter() - .map(|e| { - let kind = match e.entry_type { - mesa_dev::models::DirEntryType::File => DirEntryType::RegularFile, - mesa_dev::models::DirEntryType::Dir => DirEntryType::Directory, - }; - (e.name, kind) - }) - .collect(); -``` - -To: -```rust -let mesa_entries = match content { - Content::Dir(d) => d.entries, - Content::File(_) | Content::Symlink(_) => return Err(ReadDirError::NotADirectory), -}; - -let collected: Vec<(String, DirEntryType)> = mesa_entries - .into_iter() - .filter_map(|e| { - let (name, kind) = match e { - MesaDirEntry::File(f) => (f.name?, DirEntryType::RegularFile), - MesaDirEntry::Symlink(s) => (s.name?, DirEntryType::RegularFile), - MesaDirEntry::Dir(d) => (d.name?, DirEntryType::Directory), - }; - Some((name, kind)) - }) - .collect(); -``` - -The explicit `Vec<(String, DirEntryType)>` annotation resolves the E0282 type inference error on `OsStr::new(name)` at line 206. - -**Step 6: Update `read` — content API call and pattern matching** - -Change lines 271-280 from: -```rust -let content = self - .client - .content(&self.org_name, &self.repo_name) - .get(file_path.as_deref(), Some(self.ref_.as_str())) - .await?; - -let encoded_content = match content { - mesa_dev::models::Content::File { content, .. } => content, - mesa_dev::models::Content::Dir { .. } => return Err(ReadError::NotAFile), -}; -``` - -To: -```rust -let content = self - .client - .org(&self.org_name) - .repos() - .at(&self.repo_name) - .content() - .get(Some(self.ref_.as_str()), file_path.as_deref(), None) - .await - .map_err(|e| ReadError::RemoteMesaError(e.to_string()))?; - -let encoded_content = match content { - Content::File(f) | Content::Symlink(f) => { - f.content.unwrap_or_default() - } - Content::Dir(_) => return Err(ReadError::NotAFile), -}; -``` - ---- - -### Task 5: Build and verify - -**Step 1: Run cargo build** - -Run: `cargo build` -Expected: Successful compilation with no errors. - -**Step 2: Fix any remaining warnings or errors** - -If there are clippy warnings about `wildcard_enum_match_arm` or other lints, address them. The project has strict clippy settings (`clippy::all = "deny"`, `clippy::pedantic = "warn"`). - ---- - -### Task 6: Commit - -**Step 1: Stage and commit** - -```bash -git add src/fs/mescloud/common.rs src/fs/mescloud/mod.rs src/fs/mescloud/org.rs src/fs/mescloud/repo.rs -git commit -m "Migrate mesa-dev from 0.1.1 to 1.8.0 - -Update all API call sites to use the new hierarchical client -pattern (client.org().repos().at().content()), new Content/DirEntry -enums from mesa_dev::low_level::content, and string-based error -wrapping since the error type is now generic per endpoint." -``` diff --git a/docs/plans/2026-02-09-shellcheck-workflow.md b/docs/plans/2026-02-09-shellcheck-workflow.md deleted file mode 100644 index c612a0b..0000000 --- a/docs/plans/2026-02-09-shellcheck-workflow.md +++ /dev/null @@ -1,131 +0,0 @@ -# ShellCheck GitHub Workflow Implementation Plan - -> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. - -**Goal:** Harden the existing ShellCheck GitHub workflow to lint all shell scripts in the repo (not just `install.sh`), fix existing warnings, and enforce strictest settings. - -**Architecture:** Replace the hardcoded single-file shellcheck invocation with a dynamic `find`-based approach that catches all `.sh` files. Fix existing shellcheck warnings in `tests/docker/entrypoint.sh`. Both scripts use `#!/bin/sh` (POSIX sh), so `--shell=sh` stays appropriate as a default, but we switch to auto-detection so future scripts with different shebangs work correctly. - -**Tech Stack:** GitHub Actions, ShellCheck (pre-installed on `ubuntu-latest`) - ---- - -### Task 1: Fix shellcheck warnings in `tests/docker/entrypoint.sh` - -**Files:** -- Modify: `tests/docker/entrypoint.sh:7` - -**Step 1: Run shellcheck locally to confirm current warnings** - -Run: `shellcheck --shell=sh --severity=style --enable=all --external-sources --format=gcc ./tests/docker/entrypoint.sh` -Expected output: -``` -./tests/docker/entrypoint.sh:7:9: note: Prefer double quoting even when variables don't contain special characters. [SC2248] -./tests/docker/entrypoint.sh:7:9: note: Prefer putting braces around variable references even when not strictly required. [SC2250] -``` - -**Step 2: Fix the warnings** - -Change line 7 in `tests/docker/entrypoint.sh` from: -```sh -while [ $elapsed -lt 60 ]; do -``` -to: -```sh -while [ "${elapsed}" -lt 60 ]; do -``` - -**Step 3: Run shellcheck again to verify clean** - -Run: `shellcheck --shell=sh --severity=style --enable=all --external-sources --format=gcc ./tests/docker/entrypoint.sh` -Expected: No output (clean) - -**Step 4: Commit** - -```bash -git add tests/docker/entrypoint.sh -git commit -m "fix: resolve shellcheck warnings in entrypoint.sh" -``` - ---- - -### Task 2: Update the ShellCheck workflow to lint all shell scripts - -**Files:** -- Modify: `.github/workflows/shellcheck.yml` - -**Step 1: Replace the workflow file with the improved version** - -Replace the entire content of `.github/workflows/shellcheck.yml` with: - -```yaml -name: ShellCheck - -on: - push: - branches: [main] - paths: ["**.sh"] - pull_request: - branches: [main] - paths: ["**.sh"] - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - shellcheck: - name: ShellCheck - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - - name: Collect shell scripts - id: collect - run: | - files=$(find . -name '*.sh' -type f | sort) - if [ -z "${files}" ]; then - echo "No .sh files found" - echo "skip=true" >> "$GITHUB_OUTPUT" - else - echo "Found shell scripts:" - echo "${files}" - echo "skip=false" >> "$GITHUB_OUTPUT" - fi - - - name: Run ShellCheck - if: steps.collect.outputs.skip != 'true' - run: | - shellcheck --version - find . -name '*.sh' -type f -print0 \ - | xargs -0 shellcheck \ - --severity=style \ - --enable=all \ - --external-sources \ - --check-sourced \ - --format=gcc -``` - -Key changes from the existing workflow: -- **Finds all `.sh` files dynamically** instead of hardcoding `./install.sh` -- **Removed `--shell=sh`** — lets shellcheck auto-detect from the shebang, so bash scripts (if any are added later) get proper checks too -- **Added `--check-sourced`** — also checks files that are sourced by other scripts -- **Added a skip step** — gracefully handles the (unlikely) case where the paths filter triggers but no `.sh` files exist -- **Uses `-print0` / `xargs -0`** — handles filenames with spaces safely - -**Step 2: Validate the workflow YAML syntax** - -Run: `python3 -c "import yaml; yaml.safe_load(open('.github/workflows/shellcheck.yml'))" && echo "YAML valid"` -Expected: `YAML valid` - -**Step 3: Dry-run the shellcheck command locally to verify it passes** - -Run: `find . -name '*.sh' -type f -print0 | xargs -0 shellcheck --severity=style --enable=all --external-sources --check-sourced --format=gcc` -Expected: No output (clean — since we fixed entrypoint.sh in Task 1) - -**Step 4: Commit** - -```bash -git add .github/workflows/shellcheck.yml -git commit -m "ci: shellcheck all .sh files with strictest settings" -``` diff --git a/docs/plans/2026-02-10-composite-fs-dedup.md b/docs/plans/2026-02-10-composite-fs-dedup.md deleted file mode 100644 index 76ce2ed..0000000 --- a/docs/plans/2026-02-10-composite-fs-dedup.md +++ /dev/null @@ -1,670 +0,0 @@ -# CompositeFs Deduplication Plan - -> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. - -**Goal:** Eliminate duplicated delegation logic between `MesaFS` (mod.rs) and `OrgFs` (org.rs) by extracting a shared `CompositeFs` struct. - -**Architecture:** Both `MesaFS` and `OrgFs` implement the same "compositing filesystem" pattern: they own a set of child filesystem instances, each with a `HashMapBridge` for inode/fh translation. The delegation code for `open`, `read`, `release`, `forget`, `getattr`, `statfs`, and the inner branches of `lookup`/`readdir` is nearly identical. We extract this into a generic `CompositeFs` struct that holds the shared state and implements all delegation methods, then refactor both types to embed it. - -**Tech Stack:** Rust, async_trait, tokio - ---- - -## Feasibility Analysis - -### Why not a blanket impl? - -The user's original proposal was: - -```rust -trait Subtrait { ... } -impl Fs for T { ... } -``` - -This is **technically possible** but has significant trade-offs: - -1. **Borrow checker friction:** The subtrait needs accessor methods like `composite_mut(&mut self) -> &mut CompositeFs<...>`. All delegation goes through this single `&mut self` borrow, which prevents the split-borrow patterns the current code relies on (e.g., accessing `self.slots[idx].bridge` and `self.icache` simultaneously). Workable, but requires pre-allocating inodes before closure calls and restructuring some APIs. - -2. **Verbose subtrait definition:** The subtrait needs ~6 methods (`composite()`, `composite_mut()`, `delegation_target()`, `handle_root_lookup()`, `handle_root_readdir()`, `on_forget_cleanup()`), each implemented on both types. The net LOC savings vs thin wrappers is modest. - -3. **Indirection cost:** Readers must understand both the `MescloudFs` subtrait and the blanket impl to follow any `Fs` method. With the composition approach, each `Fs` method is a clear 1-2 line delegation. - -### Recommended approach: CompositeFs (composition) - -Extract a `CompositeFs` struct that owns the shared state and implements all delegation methods. `MesaFS` and `OrgFs` embed it and write thin `impl Fs` wrappers. This: - -- Eliminates ~200 lines of duplicated delegation logic -- Avoids borrow checker complications (direct field access within CompositeFs) -- Keeps `impl Fs` on each type readable (1-line delegations + custom root logic) -- Is a standard Rust composition pattern - ---- - -## Task 1: Create `ChildSlot` and `CompositeFs` structs - -**Files:** -- Create: `src/fs/mescloud/composite.rs` -- Modify: `src/fs/mescloud/mod.rs` (add `mod composite;`) - -**Step 1: Write the `ChildSlot` and `CompositeFs` types** - -Create `src/fs/mescloud/composite.rs`: - -```rust -use std::collections::HashMap; -use std::ffi::OsStr; -use std::time::SystemTime; - -use bytes::Bytes; -use tracing::{trace, warn}; - -use crate::fs::icache::bridge::HashMapBridge; -use crate::fs::icache::{FileTable, IcbResolver}; -use crate::fs::r#trait::{ - DirEntry, DirEntryType, FileAttr, FileHandle, FilesystemStats, Fs, Inode, LockOwner, OpenFile, - OpenFlags, -}; - -use super::common::InodeControlBlock; -use super::common::{GetAttrError, LookupError, OpenError, ReadDirError, ReadError, ReleaseError}; -use super::icache as mescloud_icache; -use super::icache::MescloudICache; - -/// A child filesystem slot: inner filesystem + bidirectional inode/fh bridge. -pub(super) struct ChildSlot { - pub inner: Inner, - pub bridge: HashMapBridge, -} - -/// Generic compositing filesystem that delegates to child `Inner` filesystems. -/// -/// Holds the shared infrastructure (icache, file table, readdir buffer, child -/// slots) and implements all the delegation methods that `MesaFS` and `OrgFs` -/// previously duplicated. -pub(super) struct CompositeFs -where - R: IcbResolver, -{ - pub icache: MescloudICache, - pub file_table: FileTable, - pub readdir_buf: Vec, - /// Maps outer inode → index into `slots` for child-root inodes. - pub child_inodes: HashMap, - pub slots: Vec>, -} -``` - -**Step 2: Verify it compiles** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` -Expected: PASS (new file is just types, no usage yet) - -**Step 3: Commit** - -```bash -git add src/fs/mescloud/composite.rs src/fs/mescloud/mod.rs -git commit -m "refactor: add CompositeFs and ChildSlot structs for shared delegation" -``` - ---- - -## Task 2: Create `InodeCachePeek` trait - -Both `OrgFs` and `RepoFs` expose `inode_table_get_attr()` for parent layers to cache attrs during readdir. Extract this into a trait so `CompositeFs` can call it generically. - -**Files:** -- Modify: `src/fs/mescloud/common.rs` (add trait definition) -- Modify: `src/fs/mescloud/org.rs` (implement trait, remove ad-hoc method) -- Modify: `src/fs/mescloud/repo.rs` (implement trait, remove ad-hoc method) - -**Step 1: Add the trait to common.rs** - -```rust -/// Allows a parent compositor to peek at cached attrs from a child filesystem. -#[async_trait::async_trait] -pub(super) trait InodeCachePeek { - async fn peek_attr(&self, ino: Inode) -> Option; -} -``` - -**Step 2: Implement on OrgFs and RepoFs** - -Replace `pub(crate) async fn inode_table_get_attr` on both types with: - -```rust -#[async_trait::async_trait] -impl InodeCachePeek for OrgFs { - async fn peek_attr(&self, ino: Inode) -> Option { - self.icache.get_attr(ino).await - } -} -``` - -(Same for RepoFs.) - -**Step 3: Update call sites in mod.rs and org.rs** - -Replace `.inode_table_get_attr(...)` calls with `.peek_attr(...)`. - -**Step 4: Verify** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` -Expected: PASS - -**Step 5: Commit** - -```bash -git add src/fs/mescloud/common.rs src/fs/mescloud/org.rs src/fs/mescloud/repo.rs src/fs/mescloud/mod.rs -git commit -m "refactor: extract InodeCachePeek trait from inode_table_get_attr" -``` - ---- - -## Task 3: Implement delegation methods on CompositeFs - -**Files:** -- Modify: `src/fs/mescloud/composite.rs` - -Add all the shared delegation methods. These are the methods that were duplicated between `MesaFS` and `OrgFs`. - -**Step 1: Add helper methods** - -```rust -impl CompositeFs -where - R: IcbResolver, - Inner: Fs< - LookupError = LookupError, - GetAttrError = GetAttrError, - OpenError = OpenError, - ReadError = ReadError, - ReaddirError = ReadDirError, - ReleaseError = ReleaseError, - > + InodeCachePeek + Send, -{ - /// Find the child slot that owns `ino` by walking the parent chain. - pub async fn slot_for_inode(&self, ino: Inode) -> Option { - if let Some(&idx) = self.child_inodes.get(&ino) { - return Some(idx); - } - let mut current = ino; - loop { - let parent = self - .icache - .get_icb(current, |icb| icb.parent) - .await - .flatten()?; - if let Some(&idx) = self.child_inodes.get(&parent) { - return Some(idx); - } - current = parent; - } - } - - /// Allocate an outer file handle and map it through the bridge. - pub fn alloc_fh(&mut self, slot_idx: usize, inner_fh: FileHandle) -> FileHandle { - let fh = self.file_table.allocate(); - self.slots[slot_idx].bridge.insert_fh(fh, inner_fh); - fh - } - - /// Translate an inner inode to an outer inode, allocating if needed. - /// Also inserts a stub ICB into the outer icache. - pub async fn translate_inner_ino( - &mut self, - slot_idx: usize, - inner_ino: Inode, - parent_outer_ino: Inode, - name: &OsStr, - ) -> Inode { - let outer_ino = self.slots[slot_idx] - .bridge - .backward_or_insert_inode(inner_ino, || self.icache.allocate_inode()); - self.icache - .entry_or_insert_icb( - outer_ino, - || InodeControlBlock { - rc: 0, - path: name.into(), - parent: Some(parent_outer_ino), - attr: None, - children: None, - }, - |_| {}, - ) - .await; - outer_ino - } -} -``` - -**Step 2: Add delegation methods** - -```rust - // -- Fs delegation methods -- - - pub async fn delegated_getattr( - &self, - ino: Inode, - ) -> Result { - self.icache.get_attr(ino).await.ok_or_else(|| { - warn!(ino, "getattr on unknown inode"); - GetAttrError::InodeNotFound - }) - } - - pub async fn delegated_open( - &mut self, - ino: Inode, - flags: OpenFlags, - ) -> Result { - let idx = self.slot_for_inode(ino).await.ok_or_else(|| { - warn!(ino, "open on inode not belonging to any child"); - OpenError::InodeNotFound - })?; - let inner_ino = self.slots[idx] - .bridge - .forward_or_insert_inode(ino, || unreachable!("open: ino should be mapped")); - let inner_open = self.slots[idx].inner.open(inner_ino, flags).await?; - let outer_fh = self.alloc_fh(idx, inner_open.handle); - trace!(ino, outer_fh, inner_fh = inner_open.handle, "open: assigned file handle"); - Ok(OpenFile { - handle: outer_fh, - options: inner_open.options, - }) - } - - pub async fn delegated_read( - &mut self, - ino: Inode, - fh: FileHandle, - offset: u64, - size: u32, - flags: OpenFlags, - lock_owner: Option, - ) -> Result { - let idx = self.slot_for_inode(ino).await.ok_or_else(|| { - warn!(ino, "read on inode not belonging to any child"); - ReadError::InodeNotFound - })?; - let inner_ino = self.slots[idx] - .bridge - .forward_or_insert_inode(ino, || unreachable!("read: ino should be mapped")); - let inner_fh = self.slots[idx].bridge.fh_forward(fh).ok_or_else(|| { - warn!(fh, "read: no fh mapping found"); - ReadError::FileNotOpen - })?; - self.slots[idx] - .inner - .read(inner_ino, inner_fh, offset, size, flags, lock_owner) - .await - } - - pub async fn delegated_release( - &mut self, - ino: Inode, - fh: FileHandle, - flags: OpenFlags, - flush: bool, - ) -> Result<(), ReleaseError> { - let idx = self.slot_for_inode(ino).await.ok_or_else(|| { - warn!(ino, "release on inode not belonging to any child"); - ReleaseError::FileNotOpen - })?; - let inner_ino = self.slots[idx] - .bridge - .forward_or_insert_inode(ino, || unreachable!("release: ino should be mapped")); - let inner_fh = self.slots[idx].bridge.fh_forward(fh).ok_or_else(|| { - warn!(fh, "release: no fh mapping found"); - ReleaseError::FileNotOpen - })?; - let result = self.slots[idx] - .inner - .release(inner_ino, inner_fh, flags, flush) - .await; - self.slots[idx].bridge.remove_fh_by_left(fh); - trace!(ino, fh, "release: cleaned up fh mapping"); - result - } - - /// Returns `true` if the inode was evicted (rc dropped to zero). - pub async fn delegated_forget(&mut self, ino: Inode, nlookups: u64) -> bool { - // Propagate forget to inner if applicable. - if let Some(idx) = self.slot_for_inode(ino).await { - if let Some(&inner_ino) = self.slots[idx].bridge.inode_map_get_by_left(ino) { - self.slots[idx].inner.forget(inner_ino, nlookups).await; - } - } - if self.icache.forget(ino, nlookups).await.is_some() { - self.child_inodes.remove(&ino); - for slot in &mut self.slots { - slot.bridge.remove_inode_by_left(ino); - } - true - } else { - false - } - } - - pub fn delegated_statfs(&self) -> FilesystemStats { - self.icache.statfs() - } - - /// Delegation branch for lookup (when parent is owned by a child slot). - pub async fn delegated_lookup( - &mut self, - parent: Inode, - name: &OsStr, - ) -> Result { - let idx = self.slot_for_inode(parent).await.ok_or(LookupError::InodeNotFound)?; - let inner_parent = self.slots[idx] - .bridge - .forward_or_insert_inode(parent, || unreachable!("lookup: parent should be mapped")); - let inner_attr = self.slots[idx].inner.lookup(inner_parent, name).await?; - let inner_ino = inner_attr.common().ino; - let outer_ino = self.translate_inner_ino(idx, inner_ino, parent, name).await; - let outer_attr = self.slots[idx].bridge.attr_backward(inner_attr); - self.icache.cache_attr(outer_ino, outer_attr).await; - let rc = self.icache.inc_rc(outer_ino).await; - trace!(outer_ino, inner_ino, rc, "lookup: resolved via delegation"); - Ok(outer_attr) - } - - /// Delegation branch for readdir (when ino is owned by a child slot). - pub async fn delegated_readdir( - &mut self, - ino: Inode, - ) -> Result<&[DirEntry], ReadDirError> { - let idx = self.slot_for_inode(ino).await.ok_or(ReadDirError::InodeNotFound)?; - let inner_ino = self.slots[idx] - .bridge - .forward_or_insert_inode(ino, || unreachable!("readdir: ino should be mapped")); - let inner_entries = self.slots[idx].inner.readdir(inner_ino).await?; - let inner_entries: Vec = inner_entries.to_vec(); - - let mut outer_entries = Vec::with_capacity(inner_entries.len()); - for entry in &inner_entries { - let outer_child_ino = self - .translate_inner_ino(idx, entry.ino, ino, &entry.name) - .await; - if let Some(inner_attr) = self.slots[idx].inner.peek_attr(entry.ino).await { - let outer_attr = self.slots[idx].bridge.attr_backward(inner_attr); - self.icache.cache_attr(outer_child_ino, outer_attr).await; - } - outer_entries.push(DirEntry { - ino: outer_child_ino, - name: entry.name.clone(), - kind: entry.kind, - }); - } - self.readdir_buf = outer_entries; - Ok(&self.readdir_buf) - } -``` - -**Step 3: Verify** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` -Expected: PASS (methods exist but aren't called yet) - -**Step 4: Commit** - -```bash -git add src/fs/mescloud/composite.rs -git commit -m "refactor: implement delegation methods on CompositeFs" -``` - ---- - -## Task 4: Refactor MesaFS to use CompositeFs - -**Files:** -- Modify: `src/fs/mescloud/mod.rs` - -**Step 1: Replace MesaFS fields with CompositeFs** - -Replace: -```rust -pub struct MesaFS { - icache: MescloudICache, - file_table: FileTable, - readdir_buf: Vec, - org_inodes: HashMap, - org_slots: Vec, -} -``` - -With: -```rust -pub struct MesaFS { - composite: CompositeFs, -} -``` - -Remove the `OrgSlot` struct (replaced by `ChildSlot`). - -**Step 2: Update `MesaFS::new`** - -Replace field initialization with `CompositeFs` construction: -```rust -pub fn new(orgs: impl Iterator, fs_owner: (u32, u32)) -> Self { - let resolver = MesaResolver { fs_owner, block_size: Self::BLOCK_SIZE }; - Self { - composite: CompositeFs { - icache: MescloudICache::new(resolver, Self::ROOT_NODE_INO, fs_owner, Self::BLOCK_SIZE), - file_table: FileTable::new(), - readdir_buf: Vec::new(), - child_inodes: HashMap::new(), - slots: orgs.map(|org_conf| { - let client = MesaClient::builder() - .with_api_key(org_conf.api_key.expose_secret()) - .with_base_path(MESA_API_BASE_URL) - .build(); - let org = OrgFs::new(org_conf.name, client, fs_owner); - ChildSlot { inner: org, bridge: HashMapBridge::new() } - }).collect(), - }, - } -} -``` - -**Step 3: Update helper methods** - -- `inode_role`: access `self.composite.child_inodes` instead of `self.org_inodes` -- `org_slot_for_inode`: replace with `self.composite.slot_for_inode(ino)` -- `ensure_org_inode`: access `self.composite.icache`, `self.composite.slots[idx]`, `self.composite.child_inodes` -- `alloc_fh`: remove (use `self.composite.alloc_fh()`) -- `translate_org_ino_to_mesa`: remove (use `self.composite.translate_inner_ino()`) - -**Step 4: Update `impl Fs for MesaFS`** - -Replace delegation methods with one-line forwards: -```rust -#[async_trait::async_trait] -impl Fs for MesaFS { - type LookupError = LookupError; - type GetAttrError = GetAttrError; - type OpenError = OpenError; - type ReadError = ReadError; - type ReaddirError = ReadDirError; - type ReleaseError = ReleaseError; - - async fn lookup(&mut self, parent: Inode, name: &OsStr) -> Result { - if parent == Self::ROOT_NODE_INO { - // Root children are orgs — custom logic stays here. - let org_name = name.to_str().ok_or(LookupError::InodeNotFound)?; - let org_idx = self.composite.slots.iter() - .position(|s| s.inner.name() == org_name) - .ok_or(LookupError::InodeNotFound)?; - let (ino, attr) = self.ensure_org_inode(org_idx).await; - self.composite.icache.inc_rc(ino).await; - Ok(attr) - } else { - self.composite.delegated_lookup(parent, name).await - } - } - - async fn getattr(&mut self, ino: Inode, _fh: Option) -> Result { - self.composite.delegated_getattr(ino).await - } - - async fn readdir(&mut self, ino: Inode) -> Result<&[DirEntry], ReadDirError> { - if ino == Self::ROOT_NODE_INO { - // Root readdir lists orgs — custom logic stays here. - // ... (keep existing root readdir logic, using self.composite.*) - } else { - self.composite.delegated_readdir(ino).await - } - } - - async fn open(&mut self, ino: Inode, flags: OpenFlags) -> Result { - self.composite.delegated_open(ino, flags).await - } - - async fn read(&mut self, ino: Inode, fh: FileHandle, offset: u64, size: u32, flags: OpenFlags, lock_owner: Option) -> Result { - self.composite.delegated_read(ino, fh, offset, size, flags, lock_owner).await - } - - async fn release(&mut self, ino: Inode, fh: FileHandle, flags: OpenFlags, flush: bool) -> Result<(), ReleaseError> { - self.composite.delegated_release(ino, fh, flags, flush).await - } - - async fn forget(&mut self, ino: Inode, nlookups: u64) { - self.composite.delegated_forget(ino, nlookups).await; - } - - async fn statfs(&mut self) -> Result { - Ok(self.composite.delegated_statfs()) - } -} -``` - -**Step 5: Verify** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` -Expected: PASS (all 36 tests) - -**Step 6: Commit** - -```bash -git add src/fs/mescloud/mod.rs -git commit -m "refactor: MesaFS now delegates to CompositeFs" -``` - ---- - -## Task 5: Refactor OrgFs to use CompositeFs - -**Files:** -- Modify: `src/fs/mescloud/org.rs` - -**Step 1: Replace OrgFs fields with CompositeFs** - -Replace: -```rust -pub struct OrgFs { - name: String, - client: MesaClient, - icache: MescloudICache, - file_table: FileTable, - readdir_buf: Vec, - repo_inodes: HashMap, - owner_inodes: HashMap, - repos: Vec, -} -``` - -With: -```rust -pub struct OrgFs { - name: String, - client: MesaClient, - composite: CompositeFs, - /// Maps org-level owner-dir inodes → owner name (github only). - owner_inodes: HashMap, -} -``` - -Remove the `RepoSlot` struct (replaced by `ChildSlot`). - -**Step 2: Update `OrgFs::new`, helper methods, and `impl Fs`** - -Same pattern as Task 4: -- `new`: build `CompositeFs` instead of individual fields -- `inode_role`: check `self.owner_inodes` and `self.composite.child_inodes` -- `repo_slot_for_inode`: replace with `self.composite.slot_for_inode(ino)` -- `ensure_repo_inode`: use `self.composite.icache.*` and `self.composite.slots` -- `alloc_fh`: remove (use `self.composite.alloc_fh()`) -- `translate_repo_ino_to_org`: remove (use `self.composite.translate_inner_ino()`) -- Delegation Fs methods: one-line forwards to `self.composite.*` -- Root/OwnerDir branches: keep custom logic, using `self.composite.*` for icache access - -**Step 3: Update `impl InodeCachePeek for OrgFs`** - -```rust -#[async_trait::async_trait] -impl InodeCachePeek for OrgFs { - async fn peek_attr(&self, ino: Inode) -> Option { - self.composite.icache.get_attr(ino).await - } -} -``` - -**Step 4: Handle `forget` cleanup for `owner_inodes`** - -```rust -async fn forget(&mut self, ino: Inode, nlookups: u64) { - let evicted = self.composite.delegated_forget(ino, nlookups).await; - if evicted { - self.owner_inodes.remove(&ino); - } -} -``` - -**Step 5: Verify** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` -Expected: PASS (all 36 tests) - -**Step 6: Commit** - -```bash -git add src/fs/mescloud/org.rs -git commit -m "refactor: OrgFs now delegates to CompositeFs" -``` - ---- - -## Task 6: Remove code separators - -Per project conventions, remove the `// ------` section separators from `mod.rs` and `org.rs` while we're in these files. - -**Step 1: Remove separators** - -Delete all lines matching `// -----------` in `mod.rs` and `org.rs`. - -**Step 2: Verify** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` -Expected: PASS - -**Step 3: Commit** - -```bash -git add src/fs/mescloud/mod.rs src/fs/mescloud/org.rs -git commit -m "chore: remove section separator comments per project conventions" -``` - ---- - -## Summary of changes - -| File | Change | -|------|--------| -| `src/fs/mescloud/composite.rs` | **NEW** — `ChildSlot`, `CompositeFs` with all delegation methods | -| `src/fs/mescloud/common.rs` | Add `InodeCachePeek` trait | -| `src/fs/mescloud/mod.rs` | Replace `OrgSlot` + duplicated fields/methods with `CompositeFs`, thin `impl Fs` wrappers | -| `src/fs/mescloud/org.rs` | Replace `RepoSlot` + duplicated fields/methods with `CompositeFs`, thin `impl Fs` wrappers | -| `src/fs/mescloud/repo.rs` | Implement `InodeCachePeek`, remove `inode_table_get_attr` | - -**Estimated net LOC change:** Remove ~150-200 lines of duplicated delegation logic, add ~120 lines of `CompositeFs` (shared once). Net reduction ~30-80 lines with much less duplication. diff --git a/docs/plans/2026-02-10-file-table.md b/docs/plans/2026-02-10-file-table.md deleted file mode 100644 index 04ac28c..0000000 --- a/docs/plans/2026-02-10-file-table.md +++ /dev/null @@ -1,280 +0,0 @@ -# FileTable: Extract File Handle Management from ICache - -> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. - -**Goal:** Extract file handle allocation into a dedicated `FileTable` type so that icaches are no longer responsible for file handle management. - -**Architecture:** Currently both `ICache` (sync) and `AsyncICache` (async) embed a monotonic file handle counter (`next_fh`). This couples inode caching with file handle allocation — two unrelated concerns. We introduce `FileTable`, a standalone atomic counter (mirroring `InodeFactory` for inodes), owned directly by each filesystem (`MesaFS`, `OrgFs`, `RepoFs`) rather than by the icache layer. - -**Tech Stack:** Rust, `std::sync::atomic::AtomicU64` - ---- - -### Task 1: Create `FileTable` type - -**Files:** -- Create: `src/fs/icache/file_table.rs` -- Modify: `src/fs/icache/mod.rs` - -**Step 1: Write the test** - -Add to `src/fs/icache/file_table.rs`: - -```rust -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn allocate_returns_monotonic_handles() { - let ft = FileTable::new(); - assert_eq!(ft.allocate(), 1); - assert_eq!(ft.allocate(), 2); - assert_eq!(ft.allocate(), 3); - } -} -``` - -**Step 2: Write the implementation** - -Create `src/fs/icache/file_table.rs`: - -```rust -use std::sync::atomic::{AtomicU64, Ordering}; - -use crate::fs::r#trait::FileHandle; - -/// Monotonically increasing file handle allocator. -pub struct FileTable { - next_fh: AtomicU64, -} - -impl FileTable { - pub fn new() -> Self { - Self { - next_fh: AtomicU64::new(1), - } - } - - pub fn allocate(&self) -> FileHandle { - self.next_fh.fetch_add(1, Ordering::Relaxed) - } -} -``` - -**Step 3: Register the module and export** - -In `src/fs/icache/mod.rs`, add: -- `mod file_table;` (private module, like `inode_factory`) -- `pub use file_table::FileTable;` - -**Step 4: Run tests to verify** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` -Expected: PASS — new type compiles and test passes. - -**Step 5: Commit** - -```bash -git add src/fs/icache/file_table.rs src/fs/icache/mod.rs -git commit -m "feat: add FileTable type for file handle allocation" -``` - ---- - -### Task 2: Remove `allocate_fh` from `AsyncICache` - -**Files:** -- Modify: `src/fs/icache/async_cache.rs` - -**Step 1: Remove the `next_fh` field from the struct** - -Remove from `AsyncICache`: -```rust -next_fh: AtomicU64, -``` - -**Step 2: Remove `allocate_fh` method** - -Remove: -```rust -pub fn allocate_fh(&self) -> FileHandle { - self.next_fh.fetch_add(1, Ordering::Relaxed) -} -``` - -**Step 3: Remove `next_fh` initialization from `new()`** - -Remove from `AsyncICache::new()`: -```rust -next_fh: AtomicU64::new(1), -``` - -**Step 4: Remove the `allocate_fh_increments` test** - -Remove the entire test block. - -**Step 5: Clean up unused imports** - -Remove `AtomicU64` from `use std::sync::atomic::{AtomicU64, Ordering};` if only used in test code. Remove `FileHandle` from `use crate::fs::r#trait::{FileHandle, Inode};` if no longer used. - -**Do NOT run verify yet** — Tasks 2–5 must all land together. - ---- - -### Task 3: Remove `allocate_fh` from `ICache` (sync) - -**Files:** -- Modify: `src/fs/icache/cache.rs` - -**Step 1: Remove the `next_fh` field** - -Remove from `ICache`: -```rust -next_fh: FileHandle, -``` - -**Step 2: Remove `allocate_fh` method** - -Remove: -```rust -pub fn allocate_fh(&mut self) -> FileHandle { - let fh = self.next_fh; - self.next_fh += 1; - fh -} -``` - -**Step 3: Remove `next_fh` from constructor** - -Remove from `ICache::new()`: -```rust -next_fh: 1, -``` - -**Step 4: Clean up unused imports** - -Remove `FileHandle` from `use crate::fs::r#trait::{FileHandle, Inode};` if no longer needed. - -**Do NOT run verify yet.** - ---- - -### Task 4: Remove `allocate_fh` from `MescloudICache` - -**Files:** -- Modify: `src/fs/mescloud/icache.rs` - -**Step 1: Remove the `allocate_fh` delegation** - -Remove entirely: -```rust -pub fn allocate_fh(&self) -> FileHandle { - self.inner.allocate_fh() -} -``` - -**Step 2: Clean up unused imports** - -Remove `FileHandle` from the `use crate::fs::r#trait::` import if no longer used in this file. - -**Do NOT run verify yet.** - ---- - -### Task 5: Add `FileTable` to each filesystem - -**Files:** -- Modify: `src/fs/mescloud/mod.rs` (`MesaFS`) -- Modify: `src/fs/mescloud/org.rs` (`OrgFs`) -- Modify: `src/fs/mescloud/repo.rs` (`RepoFs`) - -**Step 1: `MesaFS` — add `FileTable` field** - -Add import: -```rust -use crate::fs::icache::FileTable; -``` - -Add field to `MesaFS`: -```rust -file_table: FileTable, -``` - -Initialize in `MesaFS::new()`: -```rust -file_table: FileTable::new(), -``` - -Change `alloc_fh` to use the file table: -```rust -fn alloc_fh(&mut self, slot_idx: usize, org_fh: FileHandle) -> FileHandle { - let fh = self.file_table.allocate(); - self.org_slots[slot_idx].bridge.insert_fh(fh, org_fh); - fh -} -``` - -**Step 2: `OrgFs` — add `FileTable` field** - -Add import: -```rust -use crate::fs::icache::FileTable; -``` - -Add field to `OrgFs`: -```rust -file_table: FileTable, -``` - -Initialize in `OrgFs::new()`: -```rust -file_table: FileTable::new(), -``` - -Change `alloc_fh` to use the file table: -```rust -fn alloc_fh(&mut self, slot_idx: usize, repo_fh: FileHandle) -> FileHandle { - let fh = self.file_table.allocate(); - self.repos[slot_idx].bridge.insert_fh(fh, repo_fh); - fh -} -``` - -**Step 3: `RepoFs` — add `FileTable` field** - -Add import: -```rust -use crate::fs::icache::FileTable; -``` - -Add field to `RepoFs`: -```rust -file_table: FileTable, -``` - -Initialize in `RepoFs::new()`: -```rust -file_table: FileTable::new(), -``` - -In `open()`, change: -```rust -let fh = self.icache.allocate_fh(); -``` -to: -```rust -let fh = self.file_table.allocate(); -``` - -**Step 4: Run the full verify** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` -Expected: All PASS. - -**Step 5: Commit** - -```bash -git add src/fs/icache/async_cache.rs src/fs/icache/cache.rs src/fs/mescloud/icache.rs src/fs/mescloud/mod.rs src/fs/mescloud/org.rs src/fs/mescloud/repo.rs -git commit -m "refactor: move file handle allocation from icaches to FileTable on each filesystem" -``` diff --git a/docs/plans/2026-02-10-pr31-async-icache-bug-review.md b/docs/plans/2026-02-10-pr31-async-icache-bug-review.md deleted file mode 100644 index 978ec49..0000000 --- a/docs/plans/2026-02-10-pr31-async-icache-bug-review.md +++ /dev/null @@ -1,526 +0,0 @@ -# PR #31 Async ICache Bug Review - -> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. - -**Goal:** Fix bugs identified in the async icache PR before merge. - -**Architecture:** The PR replaces a synchronous `HashMap`-based `ICache` with `AsyncICache` built on `scc::HashMap` + `tokio::sync::watch` channels, introduces `CompositeFs` for shared FUSE delegation, and adds `IcbResolver` trait for async inode resolution. - -**Tech Stack:** Rust, tokio, scc::HashMap, FUSE (fuser crate) - ---- - -## Summary of Findings - -| # | Severity | Location | Description | -|---|----------|----------|-------------| -| 1 | Critical | `async_cache.rs:289-322` | `upsert_async` after lock drop can resurrect evicted entries | -| 2 | High | `async_cache.rs:133-160` | TOCTOU between `wait_for_available` and read in `get_icb`/`get_icb_mut` | -| 3 | High | `async_cache.rs:96-120` | `wait_for_available` doesn't loop on InFlight→Available→InFlight | -| 4 | High | `repo.rs:58` | `RepoResolver::resolve` panics on missing stub instead of returning error | -| 5 | Medium | `mod.rs:141-153`, `org.rs:200-215` | `inode_role` falls back to Root in release builds, hiding misrouted ops | -| 6 | Medium | `org.rs:126-144`, `mod.rs:179-196` | `ensure_owner_inode`/`ensure_org_inode` attr-missing path doesn't verify ICB exists | -| 7 | Medium | `async_cache.rs:444-451` | `for_each` uses `iter_sync` from async context (scc docs warn against this) | -| 8 | Medium | `Cargo.toml:47` | `reqwest-blocking-client` feature may deadlock in async context | -| 9 | Low | `repo.rs:268` | Redundant `cache_attr` after `get_or_resolve` in `RepoFs::lookup` | -| 10 | Low | `composite.rs:220-222` | `delegated_forget` iterates ALL slots instead of targeted removal | -| 11 | Low | `mod.rs:3` | Commented-out `local` module left behind | - -Notes on findings NOT included above: -- `ensure_child_ino` TOCTOU (duplicate inodes) — currently safe due to `&mut self` serialization on `Fs` trait. Worth a comment but not a live bug. -- `evict_zero_rc_children` non-atomic scan-then-forget — safe because `nlookups=0` makes the forget conditional on rc still being 0. -- `unreachable!()` in `forward_or_insert_inode` closures — safe due to `&mut self`, same reasoning. -- `delegated_forget` unconditionally propagating to inner FS — pre-existing design, correct by FUSE protocol invariant (inner/outer rc move in lockstep). -- `needs_resolve()` has no TTL for mutable refs — design concern for future, not a bug for fixed-ref mounts. -- `ensure_child_ino` O(n) scans — performance concern, not a correctness bug. -- `insert_icb` infinite loop on repeated resolution failures — liveness concern, unlikely in practice. - ---- - -### Task 1: Fix Critical — `upsert_async` resurrects evicted entries - -**Files:** -- Modify: `src/fs/icache/async_cache.rs:289-322` (stub resolution path) -- Modify: `src/fs/icache/async_cache.rs:330-349` (vacant path) - -**Problem:** In `get_or_resolve`, the code acquires the entry lock via `entry_async`, replaces `Available(stub)` with `InFlight(rx)`, then **drops the lock** before calling the resolver. After resolution, it writes back with `upsert_async`. Between the drop and the upsert, a concurrent `forget()` can evict the entry. `upsert_async` then **re-inserts** a dead inode — one the kernel has already forgotten. This is a reference count leak that persists until unmount. - -The same bug exists on the error path (lines 311-319): if `fallback.rc() > 0`, `upsert_async` restores the stub, but the entry may have been evicted during resolution. - -**Step 1: Write the failing test** - -Add to the test module in `async_cache.rs`: - -```rust -#[tokio::test] -async fn get_or_resolve_does_not_resurrect_evicted_entry() { - // Resolver that takes long enough for a concurrent forget to run - struct SlowResolver; - impl IcbResolver for SlowResolver { - type Icb = StubIcb; - async fn resolve( - &self, - _ino: Inode, - stub: Option, - _cache: &AsyncICache, - ) -> Result { - tokio::time::sleep(Duration::from_millis(50)).await; - Ok(StubIcb::new_resolved()) - } - } - - let cache = AsyncICache::new(SlowResolver, 1); - // Insert a stub that needs resolution, with rc=1 - cache.insert_icb(2, StubIcb::new_stub()).await; - cache.inc_rc(2); - - let resolve_handle = tokio::spawn({ - let cache_ref = &cache; // won't work directly — use Arc - async move { - cache_ref.get_or_resolve(2, |icb| icb.clone()).await - } - }); - - // Wait for resolution to start, then forget - tokio::time::sleep(Duration::from_millis(10)).await; - cache.forget(2, 1).await; // rc drops to 0, entry evicted - - // Resolution completes — should NOT resurrect the entry - let _ = resolve_handle.await; - assert!(!cache.contains(2), "evicted entry was resurrected"); -} -``` - -Note: this test will need to be adapted to the actual test infrastructure (Arc wrapping, proper StubIcb types). The key invariant being tested is: if an entry is evicted during resolution, the resolved value must not be re-inserted. - -**Step 2: Run test to verify it fails** - -Run: `cargo test --quiet -p git-fs get_or_resolve_does_not_resurrect` -Expected: FAIL — entry is resurrected because `upsert_async` unconditionally inserts. - -**Step 3: Implement the fix** - -Replace `upsert_async` with `entry_async` + conditional insert: - -```rust -// After resolver returns Ok(icb): -// Instead of: -// self.inode_table.upsert_async(ino, IcbState::Available(icb)).await; -// -// Use: -match self.inode_table.entry_async(ino).await { - Entry::Occupied(mut occ) => { - // Entry still exists (InFlight from our resolution) — update it - *occ.get_mut() = IcbState::Available(icb); - } - Entry::Vacant(_) => { - // Entry was evicted during resolution — do NOT resurrect - // The kernel has already forgotten this inode. - tracing::debug!(ino, "resolved inode was evicted during resolution, dropping result"); - } -} -``` - -Apply the same pattern to: -1. The error fallback path (lines 311-319) where `upsert_async` restores the stub -2. The vacant path (lines 340-345) where `upsert_async` stores the first resolution - -**Step 4: Run test to verify it passes** - -Run: `cargo test --quiet -p git-fs get_or_resolve_does_not_resurrect` -Expected: PASS - -**Step 5: Run full test suite** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` -Expected: All pass - -**Step 6: Commit** - -```bash -git add src/fs/icache/async_cache.rs -git commit -m "fix(icache): prevent upsert_async from resurrecting evicted entries" -``` - ---- - -### Task 2: Fix High — `wait_for_available` should loop on re-encountering InFlight - -**Files:** -- Modify: `src/fs/icache/async_cache.rs:96-120` - -**Problem:** `wait_for_available` waits once on the watch channel. After waking, it re-reads the entry. If the entry transitioned InFlight→Available→InFlight (another resolution cycle started), the re-read finds InFlight and returns `false` — callers interpret this as "inode does not exist" when it actually does. - -**Step 1: Write the failing test** - -```rust -#[tokio::test] -async fn wait_for_available_retries_on_re_inflight() { - // A resolver that resolves quickly with a stub that still needs_resolve, - // causing a second InFlight cycle when get_or_resolve is called again - // ... (test setup that causes InFlight→Available(stub)→InFlight→Available(resolved)) - // Assert that get_icb returns Some, not None. -} -``` - -**Step 2: Run test to verify it fails** - -Run: `cargo test --quiet -p git-fs wait_for_available_retries` -Expected: FAIL - -**Step 3: Implement the fix** - -Wrap `wait_for_available` in a loop: - -```rust -async fn wait_for_available(&self, ino: Inode) -> bool { - loop { - let rx = self - .inode_table - .read_async(&ino, |_, s| match s { - IcbState::InFlight(rx) => Some(rx.clone()), - IcbState::Available(_) => None, - }) - .await; - - match rx { - None => return false, // key missing - Some(None) => return true, // Available - Some(Some(mut rx)) => { - // Wait for this InFlight to resolve - let _ = rx.changed().await; - // Loop back to re-check — entry might be InFlight again - // from a new resolution cycle, or might be removed - continue; - } - } - } -} -``` - -Also update `get_icb` and `get_icb_mut` to retry when they encounter InFlight after `wait_for_available`: - -```rust -pub async fn get_icb(&self, ino: Inode, f: impl Fn(&R::Icb) -> T) -> Option { - loop { - if !self.wait_for_available(ino).await { - return None; - } - let result = self - .inode_table - .read_async(&ino, |_, state| match state { - IcbState::Available(icb) => Some(f(icb)), - IcbState::InFlight(_) => None, // retry - }) - .await; - match result { - Some(Some(val)) => return Some(val), - Some(None) => continue, // was InFlight, retry - None => return None, // key missing - } - } -} -``` - -**Step 4: Run test to verify it passes** - -Run: `cargo test --quiet -p git-fs wait_for_available_retries` -Expected: PASS - -**Step 5: Run full test suite** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` -Expected: All pass - -**Step 6: Commit** - -```bash -git add src/fs/icache/async_cache.rs -git commit -m "fix(icache): loop in wait_for_available and get_icb on re-encountering InFlight" -``` - ---- - -### Task 3: Fix High — `RepoResolver::resolve` panics on missing stub - -**Files:** -- Modify: `src/fs/mescloud/repo.rs:58` - -**Problem:** `RepoResolver::resolve` uses `unreachable!()` when `stub` is `None`. If any code path ever calls `get_or_resolve` for an inode that was never inserted as a stub, the process panics. This should return an error, not crash. - -**Step 1: Implement the fix** - -```rust -// Replace: -let stub = stub.unwrap_or_else(|| unreachable!("RepoResolver requires a stub ICB")); - -// With: -let stub = stub.ok_or(LookupError::InodeNotFound)?; -``` - -**Step 2: Run full test suite** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` -Expected: All pass - -**Step 3: Commit** - -```bash -git add src/fs/mescloud/repo.rs -git commit -m "fix(repo): return error instead of panicking on missing stub in RepoResolver" -``` - ---- - -### Task 4: Fix Medium — `inode_role` falls back to Root in release builds - -**Files:** -- Modify: `src/fs/mescloud/mod.rs:141-153` (MesaFS) -- Modify: `src/fs/mescloud/org.rs:200-215` (OrgFs) - -**Problem:** When an inode can't be classified, `debug_assert!` fires (stripped in release) and the code falls back to `InodeRole::Root`. In release builds, an unclassifiable inode silently gets the root role — `readdir` on it returns the top-level listing, `lookup` tries to match org/repo names. This can happen if the kernel caches an inode past the 1-second TTL and calls getattr/readdir after the cache has forgotten it. - -**Step 1: Implement the fix** - -Change `inode_role` to return `Option`: - -```rust -fn inode_role(&self, ino: Inode) -> Option { - if ino == Self::ROOT_NODE_INO { - return Some(InodeRole::Root); - } - if self.composite.child_inodes.contains_key(&ino) { - return Some(InodeRole::OrgOwned); - } - if self.composite.slot_for_inode(ino).is_some() { - return Some(InodeRole::OrgOwned); - } - None -} -``` - -Update all callers to handle `None` by returning `ENOENT`: - -```rust -let role = self.inode_role(ino).ok_or(LookupError::InodeNotFound)?; -``` - -**Step 2: Run full test suite** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` -Expected: All pass - -**Step 3: Commit** - -```bash -git add src/fs/mescloud/mod.rs src/fs/mescloud/org.rs -git commit -m "fix(mescloud): return ENOENT for unclassifiable inodes instead of falling back to Root" -``` - ---- - -### Task 5: Fix Medium — `ensure_owner_inode`/`ensure_org_inode` attr-missing path doesn't verify ICB exists - -**Files:** -- Modify: `src/fs/mescloud/org.rs:126-144` -- Modify: `src/fs/mescloud/mod.rs:179-196` - -**Problem:** When the inode exists in the tracking map (`owner_inodes`/`child_inodes`) but the attr is missing from the icache, the code rebuilds the attr and calls `cache_attr`. But if the ICB was evicted entirely, `cache_attr` (which calls `get_icb_mut`) returns `None` and silently does nothing. The caller receives a stale attr with an inode number that the icache doesn't track, leading to subsequent `getattr` failures. - -**Step 1: Implement the fix** - -When the attr is missing AND `cache_attr` effectively no-ops, clean up the stale tracking entry and fall through to the allocation path: - -```rust -// In ensure_owner_inode: -for (&ino, existing_owner) in &self.owner_inodes { - if existing_owner == owner { - if let Some(attr) = self.composite.icache.get_attr(ino).await { - return (ino, attr); - } - // ICB may have been evicted — check if it still exists - if self.composite.icache.contains(ino) { - let now = SystemTime::now(); - let attr = FileAttr::Directory { /* ... */ }; - self.composite.icache.cache_attr(ino, attr).await; - return (ino, attr); - } - // ICB was evicted — fall through to allocate a new one - break; - } -} -// ... allocation path (also remove the stale entry from owner_inodes) -``` - -Note: the `for` loop borrows `self.owner_inodes` immutably, so the stale entry removal must happen after the loop. Use a separate `stale_ino` variable. - -**Step 2: Run full test suite** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` -Expected: All pass - -**Step 3: Commit** - -```bash -git add src/fs/mescloud/org.rs src/fs/mescloud/mod.rs -git commit -m "fix(mescloud): handle evicted ICBs in ensure_owner_inode/ensure_org_inode" -``` - ---- - -### Task 6: Fix Medium — `for_each` uses `iter_sync` from async context - -**Files:** -- Modify: `src/fs/icache/async_cache.rs:444-451` - -**Problem:** The `for_each` method uses `scc::HashMap::scan` (synchronous shard locks) from within `async fn` callers. The scc docs warn against mixing sync and async operations. On a single-threaded tokio runtime this could deadlock; on multi-threaded it causes contention. - -**Step 1: Implement the fix** - -Replace `for_each` with an async-safe alternative. Since `scc::HashMap` provides `scan_async`, use that: - -```rust -pub async fn for_each(&self, mut f: impl FnMut(&Inode, &R::Icb)) { - self.inode_table - .scan_async(|k, v| { - if let IcbState::Available(icb) = v { - f(k, icb); - } - }) - .await; -} -``` - -Check if `scan_async` is available in the version of `scc` being used. If not, document the requirement for a multi-threaded runtime in a comment. - -**Step 2: Run full test suite** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` -Expected: All pass - -**Step 3: Commit** - -```bash -git add src/fs/icache/async_cache.rs -git commit -m "fix(icache): use scan_async instead of iter_sync for for_each" -``` - ---- - -### Task 7: Fix Medium — `reqwest-blocking-client` may deadlock in async context - -**Files:** -- Modify: `Cargo.toml:47` - -**Problem:** The `reqwest-blocking-client` feature uses blocking HTTP, which can panic or deadlock inside a tokio runtime. The batch span exporter runs its own thread so it's likely fine, but the async client is safer. - -**Step 1: Implement the fix** - -```toml -# Replace: -opentelemetry-otlp = { version = "0.29", features = ["http-proto", "trace", "reqwest-blocking-client"], optional = true } - -# With: -opentelemetry-otlp = { version = "0.29", features = ["http-proto", "trace", "reqwest-client"], optional = true } -``` - -**Step 2: Verify it compiles** - -Run: `cargo check --features __otlp_export` -Expected: Compiles without errors. If the OTLP exporter builder API differs for async reqwest, adjust the builder code in `trc.rs` accordingly. - -**Step 3: Run full test suite** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` -Expected: All pass - -**Step 4: Commit** - -```bash -git add Cargo.toml Cargo.lock src/trc.rs -git commit -m "fix(deps): use async reqwest client for OTLP export to avoid blocking in tokio" -``` - ---- - -### Task 8: Fix Low — Redundant `cache_attr` after `get_or_resolve` in `RepoFs::lookup` - -**Files:** -- Modify: `src/fs/mescloud/repo.rs` (lookup method, around line 268) - -**Problem:** `get_or_resolve` already stores the resolved ICB (including attr) via `upsert_async`. The subsequent `cache_attr` reads the ICB back and writes the same attr — two unnecessary shard lock acquisitions. - -**Step 1: Implement the fix** - -Remove the redundant `cache_attr` call: - -```rust -// In RepoFs::lookup, remove this line: -self.icache.cache_attr(ino, attr).await; -``` - -**Step 2: Run full test suite** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` -Expected: All pass - -**Step 3: Commit** - -```bash -git add src/fs/mescloud/repo.rs -git commit -m "fix(repo): remove redundant cache_attr after get_or_resolve in lookup" -``` - ---- - -### Task 9: Fix Low — `delegated_forget` iterates ALL slots - -**Files:** -- Modify: `src/fs/mescloud/composite.rs:220-222` - -**Problem:** `delegated_forget` already identifies the correct slot via `slot_for_inode` (line 212) but then iterates all slots to remove the inode from bridges. This is wasteful. - -**Step 1: Implement the fix** - -```rust -// Replace: -for slot in &mut self.slots { - slot.bridge.remove_inode_by_left(ino); -} - -// With: -if let Some(idx) = self.slot_for_inode(ino) { - self.slots[idx].bridge.remove_inode_by_left(ino); -} -``` - -Wait — looking at the code again, `slot_for_inode` is already called at line 212 and the result is used for forwarding. The removal should use that same index. Check that `inode_to_slot` has already been updated before this point; if `inode_to_slot.remove` happens at line 219 before the bridge cleanup, we need to capture the index earlier. - -**Step 2: Run full test suite** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` -Expected: All pass - -**Step 3: Commit** - -```bash -git add src/fs/mescloud/composite.rs -git commit -m "fix(composite): target single slot in delegated_forget instead of iterating all" -``` - ---- - -## Findings NOT requiring fixes (acknowledged, documented) - -These are design concerns or latent issues that are safe under current architecture (`&mut self` serialization on `Fs` trait): - -1. **`ensure_child_ino` TOCTOU** — safe due to `&mut self`. Add a `// SAFETY:` comment documenting why. -2. **`unreachable!()` in `forward_or_insert_inode`** — safe due to `&mut self`. Would become bugs if `Fs` changes to `&self`. -3. **`delegated_forget` unconditional propagation to inner FS** — correct by FUSE protocol invariant (inner/outer rc move in lockstep). Pre-existing design. -4. **`needs_resolve()` no TTL for mutable refs** — design concern for future. Not a bug for current fixed-ref mounts. -5. **O(n) linear scans in `ensure_child_ino` and `evict_zero_rc_children`** — performance concern for large repos. Worth optimizing with a parent→children index but not a correctness bug. -6. **`insert_icb` infinite loop on repeated failures** — liveness concern. In practice, resolution should eventually succeed. -7. **Commented-out `local` module** — cleanup task, not a bug. -8. **OTLP only in Ugly mode** — appears intentional. Confirm with author. -9. **`readdir` caches `size: 0` placeholder attrs** — standard FUSE pattern. Subsequent `lookup` triggers real resolution. -10. **`OrgResolver`/`MesaResolver` return `children: Some(vec![])`** — intentional. Org/Mesa layers manage children via `readdir`, not the resolver. diff --git a/docs/plans/2026-02-10-pr31-bug-review.md b/docs/plans/2026-02-10-pr31-bug-review.md deleted file mode 100644 index 601907e..0000000 --- a/docs/plans/2026-02-10-pr31-bug-review.md +++ /dev/null @@ -1,79 +0,0 @@ -# PR #31 Bug Review Plan - -> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:dispatching-parallel-agents to execute this review plan. - -**Goal:** Find bugs in PR #31 (MES-710: Add an async icache) by reviewing every changed file, every domain of concern, and every critical code path. - -**Architecture:** Three-phase review: (1) per-file structural review, (2) cross-cutting domain review, (3) end-to-end code path review. Each phase uses parallel subagents. - -**Tech Stack:** Rust, tokio, scc::HashMap (lock-free concurrent map), FUSE (via fuser crate), watch channels for InFlight signaling. - ---- - -## Phase 1: Per-File Review (parallel agents) - -Each agent reads one file (final state on branch) and the diff, looking for bugs. - -### Agent 1.1: `async_cache.rs` (1210 lines — highest risk) -**File:** `src/fs/icache/async_cache.rs` -**Focus:** Race conditions in InFlight/Available state machine, deadlocks, lost wakeups, `unreachable!` panics, correctness of `forget`/`inc_rc`/`get_or_resolve`. - -### Agent 1.2: `composite.rs` (294 lines) -**File:** `src/fs/mescloud/composite.rs` -**Focus:** Inode/FH translation correctness, bridge cleanup on forget, `readdir_buf` lifetime, `unreachable!` in `forward_or_insert_inode` closures. - -### Agent 1.3: `icache.rs` (mescloud wrapper) -**File:** `src/fs/mescloud/icache.rs` -**Focus:** `needs_resolve()` logic, `evict_zero_rc_children` correctness (iterating while modifying), `ensure_child_ino` O(n) scan, `cache_attr` silent failure. - -### Agent 1.4: `mod.rs` (MesaFS) -**File:** `src/fs/mescloud/mod.rs` -**Focus:** `MesaResolver` always returning `Infallible`, `inode_role` fallback to `Root`, `ensure_org_inode` bridge reset, removed `debug_assert!`s. - -### Agent 1.5: `org.rs` (OrgFs) -**File:** `src/fs/mescloud/org.rs` -**Focus:** `OrgResolver`, `register_repo_slot` orphaned slot handling, `owner_inodes` cleanup on forget, github special casing. - -### Agent 1.6: `repo.rs` (RepoFs) -**File:** `src/fs/mescloud/repo.rs` -**Focus:** `RepoResolver::resolve` with `unreachable!` on missing stub, `build_repo_path` infinite loop potential, `readdir` calling `get_or_resolve` then caching attr again, `path_of_inode` duplication. - -### Agent 1.7: Small files (grouped) -**Files:** `src/fs/icache/file_table.rs`, `src/fs/icache/inode_factory.rs`, `src/fs/icache/mod.rs`, `src/fs/mescloud/common.rs` -**Focus:** Atomic ordering correctness (`Relaxed` for monotonic counters), `IcbLike` requiring `Clone`, error conversion completeness. - -### Agent 1.8: `trc.rs` + `fuser.rs` -**Files:** `src/trc.rs`, `src/fs/fuser.rs` -**Focus:** OTLP shutdown ordering, feature flag correctness, instrument name changes. - -## Phase 2: Domain Review (parallel agents) - -### Agent 2.1: Concurrency & Race Conditions -**Scope:** All files using `AsyncICache`, `scc::HashMap`, `watch` channels -**Focus:** TOCTOU between `wait_for_available` and subsequent `update_async`/`read_async`, ABA problems in InFlight→Available→InFlight transitions, `for_each` + concurrent mutation, deadlock scenarios with nested shard locks. - -### Agent 2.2: FUSE Ref-Counting Correctness -**Scope:** `inc_rc`, `forget`, `lookup` across all filesystem layers -**Focus:** Every `lookup` must `inc_rc` exactly once, every `forget` must propagate to inner FS, `inc_rc` returning `None` must fail the lookup (not silently proceed), ref-count leaks when errors occur after `inc_rc`. - -### Agent 2.3: Error Recovery & Cleanup -**Scope:** `get_or_resolve` error paths, `InFlight` cleanup on resolver failure -**Focus:** Is the `InFlight` entry always removed/restored on error? Does the `watch::Sender` always get dropped? What happens to waiters when resolution fails? Are there resource leaks? - -### Agent 2.4: CompositeFs Bridge Consistency -**Scope:** `composite.rs`, `mod.rs`, `org.rs` — all bridge operations -**Focus:** Are `child_inodes`, `inode_to_slot`, and bridge maps kept in sync? Does `delegated_forget` clean up all three? What about `readdir_buf` aliasing? - -## Phase 3: Code Path Review (parallel agents) - -### Agent 3.1: Lookup Path (FUSE → MesaFS → OrgFs → RepoFs) -**Trace:** `FuserAdapter::lookup` → `MesaFS::lookup` → `CompositeFs::delegated_lookup` → `OrgFs::lookup` → `CompositeFs::delegated_lookup` → `RepoFs::lookup` → `RepoResolver::resolve` -**Focus:** Are inodes properly translated at each boundary? Is `inc_rc` called exactly once at each layer? What happens if the inner lookup succeeds but `inc_rc` returns `None`? - -### Agent 3.2: Readdir Path -**Trace:** `FuserAdapter::readdir` → `MesaFS::readdir` → `CompositeFs::delegated_readdir` → `OrgFs::readdir` → `CompositeFs::delegated_readdir` → `RepoFs::readdir` -**Focus:** `readdir_buf` ownership and aliasing, `evict_zero_rc_children` TOCTOU with concurrent lookups, `translate_inner_ino` creating stubs that may conflict with concurrent resolvers. - -### Agent 3.3: Forget/Eviction Path -**Trace:** `FuserAdapter::forget` → `MesaFS::forget` → `CompositeFs::delegated_forget` → `OrgFs::forget` → `CompositeFs::delegated_forget` → `RepoFs::forget` -**Focus:** Does forget propagate correctly through all layers? Is the bridge cleaned up before or after the inner forget? Can forget race with a concurrent lookup that's incrementing rc? diff --git a/docs/plans/2026-02-10-readdir-icache-caching.md b/docs/plans/2026-02-10-readdir-icache-caching.md deleted file mode 100644 index 008a6e0..0000000 --- a/docs/plans/2026-02-10-readdir-icache-caching.md +++ /dev/null @@ -1,572 +0,0 @@ -# RepoFs readdir icache caching Implementation Plan - -> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. - -**Goal:** Make `RepoFs::readdir` read directory listings from the icache (via the resolver) instead of calling the mesa API on every invocation. - -**Architecture:** Add a `children` field to `InodeControlBlock` storing `Option>`. The `RepoResolver` populates this field when resolving directory inodes (it already calls the content API). `readdir` then calls `get_or_resolve` on the icache, which transparently invokes the resolver on cache miss. `needs_resolve()` is updated to return `true` for directory ICBs that lack children, ensuring directories get fully resolved on first access. For a fixed ref, directory contents are immutable, making this cache always valid. - -**Tech Stack:** Rust, tokio, scc (concurrent HashMap), mesa_dev SDK - ---- - -### Task 1: Add `children` field to `InodeControlBlock` - -**Files:** -- Modify: `src/fs/mescloud/icache.rs:1-39` - -**Step 1: Write a failing test for `needs_resolve()` on directory ICBs** - -Add a `#[cfg(test)]` module at the bottom of `src/fs/mescloud/icache.rs`: - -```rust -#[cfg(test)] -mod tests { - use super::*; - use crate::fs::r#trait::DirEntryType; - - fn dummy_dir_attr(ino: Inode) -> FileAttr { - let now = std::time::SystemTime::now(); - FileAttr::Directory { - common: make_common_file_attr(ino, 0o755, now, now, (0, 0), 4096), - } - } - - fn dummy_file_attr(ino: Inode) -> FileAttr { - let now = std::time::SystemTime::now(); - FileAttr::RegularFile { - common: make_common_file_attr(ino, 0o644, now, now, (0, 0), 4096), - size: 100, - blocks: 1, - } - } - - #[test] - fn needs_resolve_stub_returns_true() { - let icb = InodeControlBlock { - parent: Some(1), - rc: 0, - path: "stub".into(), - attr: None, - children: None, - }; - assert!(icb.needs_resolve()); - } - - #[test] - fn needs_resolve_file_with_attr_returns_false() { - let icb = InodeControlBlock { - parent: Some(1), - rc: 1, - path: "file.txt".into(), - attr: Some(dummy_file_attr(2)), - children: None, - }; - assert!(!icb.needs_resolve()); - } - - #[test] - fn needs_resolve_dir_without_children_returns_true() { - let icb = InodeControlBlock { - parent: Some(1), - rc: 1, - path: "dir".into(), - attr: Some(dummy_dir_attr(3)), - children: None, - }; - assert!(icb.needs_resolve()); - } - - #[test] - fn needs_resolve_dir_with_children_returns_false() { - let icb = InodeControlBlock { - parent: Some(1), - rc: 1, - path: "dir".into(), - attr: Some(dummy_dir_attr(3)), - children: Some(vec![ - ("README.md".to_owned(), DirEntryType::RegularFile), - ]), - }; - assert!(!icb.needs_resolve()); - } - - #[test] - fn needs_resolve_dir_with_empty_children_returns_false() { - let icb = InodeControlBlock { - parent: Some(1), - rc: 1, - path: "empty-dir".into(), - attr: Some(dummy_dir_attr(4)), - children: Some(vec![]), - }; - assert!(!icb.needs_resolve()); - } -} -``` - -**Step 2: Run test to verify it fails** - -Run: `cargo test --quiet -p git-fs --lib mescloud::icache::tests` -Expected: FAIL — `InodeControlBlock` doesn't have `children` field yet. - -**Step 3: Add `children` field and update `needs_resolve()`** - -In `src/fs/mescloud/icache.rs`, add the import for `DirEntryType`: - -```rust -use crate::fs::r#trait::{CommonFileAttr, DirEntryType, FileAttr, FilesystemStats, Inode, Permissions}; -``` - -Update the struct: - -```rust -pub struct InodeControlBlock { - pub parent: Option, - pub rc: u64, - pub path: std::path::PathBuf, - /// Cached file attributes from the last lookup. - pub attr: Option, - /// Cached directory children from the resolver (directories only). - pub children: Option>, -} -``` - -Update `new_root`: - -```rust -fn new_root(path: std::path::PathBuf) -> Self { - Self { - rc: 1, - parent: None, - path, - attr: None, - children: None, - } -} -``` - -Update `needs_resolve`: - -```rust -fn needs_resolve(&self) -> bool { - match self.attr { - None => true, - Some(FileAttr::Directory { .. }) => self.children.is_none(), - Some(_) => false, - } -} -``` - -**Step 4: Fix all `InodeControlBlock` construction sites** - -Every place that creates an `InodeControlBlock` literal must add `children: None` (or `children: Some(...)` where appropriate). These are all in `src/fs/mescloud/`: - -1. **`src/fs/mescloud/icache.rs:231`** — `ensure_child_ino` stub: - ```rust - InodeControlBlock { - rc: 0, - path: name.into(), - parent: Some(parent), - attr: None, - children: None, - } - ``` - -2. **`src/fs/mescloud/mod.rs:62`** — `MesaResolver::resolve` stub fallback: - ```rust - let stub = stub.unwrap_or_else(|| InodeControlBlock { - parent: None, - path: "/".into(), - rc: 0, - attr: None, - children: None, - }); - ``` - -3. **`src/fs/mescloud/mod.rs:74`** — `MesaResolver::resolve` return (directories — set `children: Some(vec![])`): - ```rust - Ok(InodeControlBlock { - attr: Some(attr), - children: Some(vec![]), - ..stub - }) - ``` - -4. **`src/fs/mescloud/mod.rs:227`** — `MesaFS::ensure_org_inode` insert: - ```rust - InodeControlBlock { - rc: 0, - path: org_name.as_str().into(), - parent: Some(Self::ROOT_NODE_INO), - attr: None, - children: None, - } - ``` - -5. **`src/fs/mescloud/mod.rs:280`** — `MesaFS::translate_org_ino_to_mesa` factory: - ```rust - InodeControlBlock { - rc: 0, - path: name.into(), - parent: Some(parent_mesa_ino), - attr: None, - children: None, - } - ``` - -6. **`src/fs/mescloud/org.rs:51`** — `OrgResolver::resolve` stub fallback: - ```rust - let stub = stub.unwrap_or_else(|| InodeControlBlock { - parent: None, - path: "/".into(), - rc: 0, - attr: None, - children: None, - }); - ``` - -7. **`src/fs/mescloud/org.rs:63`** — `OrgResolver::resolve` return (directories — set `children: Some(vec![])`): - ```rust - Ok(InodeControlBlock { - attr: Some(attr), - children: Some(vec![]), - ..stub - }) - ``` - -8. **`src/fs/mescloud/org.rs:179-188`** — `OrgFs::ensure_owner_inode` insert: - ```rust - InodeControlBlock { - rc: 0, - path: owner.into(), - parent: Some(Self::ROOT_INO), - attr: None, - children: None, - } - ``` - -9. **`src/fs/mescloud/org.rs:325-334`** — `OrgFs::ensure_repo_inode` insert: - ```rust - InodeControlBlock { - rc: 0, - path: display_name.into(), - parent: Some(parent_ino), - attr: None, - children: None, - } - ``` - -10. **`src/fs/mescloud/org.rs:411`** — `OrgFs::translate_repo_ino_to_org` factory: - ```rust - InodeControlBlock { - rc: 0, - path: name.into(), - parent: Some(parent_org_ino), - attr: None, - children: None, - } - ``` - -11. **`src/fs/mescloud/repo.rs:99`** — `RepoResolver::resolve` return (will be updated in Task 2 — for now, add `children: None`): - ```rust - Ok(InodeControlBlock { - parent: stub.parent, - path: stub.path, - rc: stub.rc, - attr: Some(attr), - children: None, - }) - ``` - -**Step 5: Run tests to verify they pass** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` -Expected: All tests PASS, including the new `needs_resolve` tests. - -**Step 6: Commit** - -```bash -git add src/fs/mescloud/icache.rs src/fs/mescloud/mod.rs src/fs/mescloud/org.rs src/fs/mescloud/repo.rs -git commit -m "feat: add children field to InodeControlBlock for directory caching" -``` - ---- - -### Task 2: Populate `children` in `RepoResolver` - -**Files:** -- Modify: `src/fs/mescloud/repo.rs:37-107` (the `RepoResolver::resolve` impl) - -**Step 1: Update `RepoResolver::resolve` to populate `children` for directories** - -In `src/fs/mescloud/repo.rs`, inside the `resolve` async block, after building `attr`, extract children from `Content::Dir`: - -```rust -async move { - let stub = stub.unwrap_or_else(|| unreachable!("RepoResolver requires a stub ICB")); - let file_path = build_repo_path(stub.parent, &stub.path, cache, RepoFs::ROOT_INO).await; - - let content = client - .org(&org_name) - .repos() - .at(&repo_name) - .content() - .get(Some(ref_.as_str()), file_path.as_deref(), None) - .await - .map_err(MesaApiError::from)?; - - let now = SystemTime::now(); - let attr = match &content { - Content::File(f) => { - let size = f.size.to_u64().unwrap_or(0); - FileAttr::RegularFile { - common: mescloud_icache::make_common_file_attr( - ino, 0o644, now, now, fs_owner, block_size, - ), - size, - blocks: mescloud_icache::blocks_of_size(block_size, size), - } - } - Content::Symlink(s) => { - let size = s.size.to_u64().unwrap_or(0); - FileAttr::RegularFile { - common: mescloud_icache::make_common_file_attr( - ino, 0o644, now, now, fs_owner, block_size, - ), - size, - blocks: mescloud_icache::blocks_of_size(block_size, size), - } - } - Content::Dir(_) => FileAttr::Directory { - common: mescloud_icache::make_common_file_attr( - ino, 0o755, now, now, fs_owner, block_size, - ), - }, - }; - - let children = match content { - Content::Dir(d) => Some( - d.entries - .into_iter() - .filter_map(|e| { - let (name, kind) = match e { - MesaDirEntry::File(f) => (f.name?, DirEntryType::RegularFile), - // TODO(MES-712): return DirEntryType::Symlink once readlink is wired up. - MesaDirEntry::Symlink(s) => (s.name?, DirEntryType::RegularFile), - MesaDirEntry::Dir(d) => (d.name?, DirEntryType::Directory), - }; - Some((name, kind)) - }) - .collect(), - ), - Content::File(_) | Content::Symlink(_) => None, - }; - - Ok(InodeControlBlock { - parent: stub.parent, - path: stub.path, - rc: stub.rc, - attr: Some(attr), - children, - }) -} -``` - -Note: The `match &content` (borrow) for `attr` must come before `match content` (move) for `children`. The existing code already borrows for `attr`, so this change only adds a second `match` that consumes `content`. - -**Step 2: Run tests to verify they pass** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` -Expected: PASS. This is a purely additive change — the resolver now populates `children` but nothing reads it yet. - -**Step 3: Commit** - -```bash -git add src/fs/mescloud/repo.rs -git commit -m "feat: populate children in RepoResolver for directory inodes" -``` - ---- - -### Task 3: Add `From for ReadDirError` conversion - -**Files:** -- Modify: `src/fs/mescloud/common.rs:125-149` - -**Step 1: Write a failing test for the conversion** - -Add tests at the bottom of `src/fs/mescloud/common.rs`: - -```rust -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn lookup_inode_not_found_converts_to_readdir_inode_not_found() { - let err: ReadDirError = LookupError::InodeNotFound.into(); - assert!(matches!(err, ReadDirError::InodeNotFound)); - } - - #[test] - fn lookup_file_does_not_exist_converts_to_readdir_inode_not_found() { - let err: ReadDirError = LookupError::FileDoesNotExist.into(); - assert!(matches!(err, ReadDirError::InodeNotFound)); - } - - #[test] - fn lookup_remote_error_converts_to_readdir_remote_error() { - let api_err = MesaApiError::Response { - status: 500, - body: "test".to_owned(), - }; - let err: ReadDirError = LookupError::RemoteMesaError(api_err).into(); - assert!(matches!(err, ReadDirError::RemoteMesaError(_))); - } -} -``` - -**Step 2: Run test to verify it fails** - -Run: `cargo test --quiet -p git-fs --lib mescloud::common::tests` -Expected: FAIL — `From for ReadDirError` not implemented. - -**Step 3: Add the `From` impl** - -In `src/fs/mescloud/common.rs`, add after the `ReadDirError` definition (before `impl From for i32`): - -```rust -impl From for ReadDirError { - fn from(e: LookupError) -> Self { - match e { - LookupError::RemoteMesaError(api) => Self::RemoteMesaError(api), - LookupError::InodeNotFound | LookupError::FileDoesNotExist => Self::InodeNotFound, - } - } -} -``` - -**Step 4: Run tests to verify they pass** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` -Expected: PASS. - -**Step 5: Commit** - -```bash -git add src/fs/mescloud/common.rs -git commit -m "feat: add From for ReadDirError conversion" -``` - ---- - -### Task 4: Rewrite `RepoFs::readdir` to use icache - -**Files:** -- Modify: `src/fs/mescloud/repo.rs:260-350` (the `readdir` impl) - -**Step 1: Replace the direct API call with `get_or_resolve`** - -Replace the entire `readdir` method body in `src/fs/mescloud/repo.rs`: - -```rust -#[instrument(skip(self), fields(repo = %self.repo_name))] -async fn readdir(&mut self, ino: Inode) -> Result<&[DirEntry], ReadDirError> { - debug_assert!( - self.icache.contains(ino), - "readdir: inode {ino} not in inode table" - ); - debug_assert!( - matches!( - self.icache.get_attr(ino).await, - Some(FileAttr::Directory { .. }) | None - ), - "readdir: inode {ino} has non-directory cached attr" - ); - - let children = self - .icache - .get_or_resolve(ino, |icb| icb.children.clone()) - .await? - .ok_or(ReadDirError::NotADirectory)?; - - trace!(ino, count = children.len(), "readdir: resolved directory listing from icache"); - - let mut entries = Vec::with_capacity(children.len()); - for (name, kind) in &children { - let child_ino = self.icache.ensure_child_ino(ino, OsStr::new(name)).await; - let now = SystemTime::now(); - let attr = match kind { - DirEntryType::Directory => FileAttr::Directory { - common: mescloud_icache::make_common_file_attr( - child_ino, - 0o755, - now, - now, - self.icache.fs_owner(), - self.icache.block_size(), - ), - }, - DirEntryType::RegularFile - | DirEntryType::Symlink - | DirEntryType::CharDevice - | DirEntryType::BlockDevice - | DirEntryType::NamedPipe - | DirEntryType::Socket => FileAttr::RegularFile { - common: mescloud_icache::make_common_file_attr( - child_ino, - 0o644, - now, - now, - self.icache.fs_owner(), - self.icache.block_size(), - ), - size: 0, - blocks: 0, - }, - }; - self.icache.cache_attr(child_ino, attr).await; - entries.push(DirEntry { - ino: child_ino, - name: name.clone().into(), - kind: *kind, - }); - } - - self.readdir_buf = entries; - Ok(&self.readdir_buf) -} -``` - -Key differences from original: -- No `self.path_of_inode(ino)` call -- No `self.client.org(...).repos().at(...).content().get(...)` call -- Instead: `self.icache.get_or_resolve(ino, |icb| icb.children.clone())` reads cached children (resolver fetches on miss) -- Error type uses `?` with `From for ReadDirError` (from Task 3) -- The child inode allocation and attr caching loop is identical - -**Step 2: Run full verification** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` -Expected: PASS. Clippy may warn about unused imports — address in next step. - -**Step 3: Clean up unused imports if any** - -After the change, check whether any imports in `repo.rs` are now unused. The `readdir` method no longer uses: -- `self.client` in readdir (still used by `read`) -- `self.path_of_inode` in readdir (still used by `read`) - -All imports should still be needed since `read` uses `Content` and the resolver uses `MesaDirEntry`. Verify with clippy output. - -**Step 4: Run full verification again** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` -Expected: PASS with no warnings. - -**Step 5: Commit** - -```bash -git add src/fs/mescloud/repo.rs -git commit -m "feat: readdir reads from icache instead of querying API directly" -``` diff --git a/docs/plans/2026-02-10-resolver-as-data-provider.md b/docs/plans/2026-02-10-resolver-as-data-provider.md deleted file mode 100644 index 5d45571..0000000 --- a/docs/plans/2026-02-10-resolver-as-data-provider.md +++ /dev/null @@ -1,334 +0,0 @@ -# Consolidate Attr Creation Into Resolvers - -> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. - -**Goal:** Eliminate manual `FileAttr` construction in `ensure_*` methods by delegating attr creation to the resolver via `get_or_resolve`, making the resolver the single source of truth for attribute data. - -**Architecture:** The async icache has a resolver-based state machine (`InFlight`/`Available`) where `get_or_resolve` is the canonical path for populating ICBs. Currently, `ensure_owner_inode`, `ensure_repo_inode`, and `ensure_org_inode` bypass this by manually constructing `FileAttr::Directory` and calling `cache_attr`. This refactoring makes them insert stubs (attr=None) and then call `get_or_resolve`, which triggers the resolver to produce the attr. The resolvers (`OrgResolver`, `MesaResolver`) already contain this exact logic. - -**Tech Stack:** Rust, tokio, scc::HashMap - ---- - -## Context - -### The Problem - -Three `ensure_*` methods duplicate attr construction that their resolvers already handle: - -| Method | File | Resolver | -|---|---|---| -| `ensure_owner_inode` | `src/fs/mescloud/org.rs:152` | `OrgResolver` | -| `ensure_repo_inode` | `src/fs/mescloud/org.rs:276` | `OrgResolver` | -| `ensure_org_inode` | `src/fs/mescloud/mod.rs:181` | `MesaResolver` | - -Each method manually constructs `FileAttr::Directory { common: make_common_file_attr(...) }` in **two places** (existing-entry-missing-attr fallback + new-entry creation), then calls `cache_attr`. The resolvers do the exact same construction. This is ~6 duplicated attr-construction sites. - -### The Fix - -Replace manual construction with the resolver flow: -1. `insert_icb(stub)` — creates entry with `attr: None` -2. `get_or_resolve(ino, |icb| icb.attr)` — resolver populates the attr - -The "existing entry with missing attr" defensive fallback also becomes unnecessary since `get_or_resolve` handles stubs (where `needs_resolve()` returns true) by calling the resolver. - -### What Stays The Same - -- `insert_icb` remains for stub creation (it's the correct way to seed the inode table with parent/path before resolution) -- `cache_attr` remains for cross-layer attr propagation (MesaFS ← OrgFs ← RepoFs via bridges) — this is NOT resolver data -- `ensure_child_ino` in `MescloudICache` stays unchanged (it creates stubs for the repo layer, resolved later by `RepoResolver`) -- `entry_or_insert_icb` stays unchanged (used by `translate_*` methods for bridge-level ICB mirroring) - ---- - -### Task 1: Change `OrgResolver::Error` to `Infallible` - -`OrgResolver::resolve` always returns `Ok(...)` — it synthesizes directory attrs from local data with no I/O. The error type `LookupError` is misleading. Changing to `Infallible` makes the guarantee explicit and aligns with `MesaResolver` which already uses `Infallible`. - -**Files:** -- Modify: `src/fs/mescloud/org.rs:35-69` (OrgResolver impl) - -**Step 1: Update the OrgResolver impl** - -Change the error type and return type: - -```rust -impl IcbResolver for OrgResolver { - type Icb = InodeControlBlock; - type Error = std::convert::Infallible; - - fn resolve( - &self, - ino: Inode, - stub: Option, - _cache: &AsyncICache, - ) -> impl Future> + Send - where - Self: Sized, - { - // ... body unchanged ... - } -} -``` - -**Step 2: Verify** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` - -**Step 3: Commit** - -```bash -git add src/fs/mescloud/org.rs -git commit -m "refactor: change OrgResolver::Error to Infallible" -``` - ---- - -### Task 2: Refactor `ensure_owner_inode` to use resolver - -**Files:** -- Modify: `src/fs/mescloud/org.rs:152-202` - -**Step 1: Replace the method body** - -The new structure: find-or-create the inode, then resolve through the canonical path. - -```rust -async fn ensure_owner_inode(&mut self, owner: &str) -> (Inode, FileAttr) { - let existing_ino = self - .owner_inodes - .iter() - .find_map(|(&ino, existing_owner)| (existing_owner == owner).then_some(ino)); - - let ino = if let Some(ino) = existing_ino { - ino - } else { - let ino = self.icache.allocate_inode(); - self.icache - .insert_icb( - ino, - InodeControlBlock { - rc: 0, - path: owner.into(), - parent: Some(Self::ROOT_INO), - attr: None, - }, - ) - .await; - self.owner_inodes.insert(ino, owner.to_owned()); - ino - }; - - let attr = self - .icache - .get_or_resolve(ino, |icb| icb.attr.expect("resolved ICB must have attr")) - .await - .unwrap(); // OrgResolver is infallible - (ino, attr) -} -``` - -Key changes: -- Removed all manual `FileAttr::Directory` construction (was in 2 places) -- Removed the "attr missing → rebuild" defensive fallback — `get_or_resolve` handles this via the resolver -- Removed `use std::time::SystemTime` usage in this method (resolver handles it) -- Single `get_or_resolve` call covers both "existing with attr" (fast path) and "existing without attr" / "newly created stub" (resolver path) - -**Step 2: Verify** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` - -**Step 3: Commit** - -```bash -git add src/fs/mescloud/org.rs -git commit -m "refactor: ensure_owner_inode delegates attr creation to OrgResolver" -``` - ---- - -### Task 3: Refactor `ensure_repo_inode` to use resolver - -**Files:** -- Modify: `src/fs/mescloud/org.rs:276-361` - -**Step 1: Replace the method body** - -Same pattern. Side-effects (RepoFs creation, bridge setup) happen in the "allocate new" branch before resolution. - -```rust -async fn ensure_repo_inode( - &mut self, - repo_name: &str, - display_name: &str, - default_branch: &str, - parent_ino: Inode, -) -> (Inode, FileAttr) { - let repos = &self.repos; - let existing_ino = self - .repo_inodes - .iter() - .find_map(|(&ino, &idx)| (repos[idx].repo.repo_name() == repo_name).then_some(ino)); - - let ino = if let Some(ino) = existing_ino { - let rc = self.icache.get_icb(ino, |icb| icb.rc).await.unwrap_or(0); - trace!(ino, repo = repo_name, rc, "ensure_repo_inode: reusing"); - ino - } else { - let ino = self.icache.allocate_inode(); - trace!(ino, repo = repo_name, "ensure_repo_inode: allocated new inode"); - - self.icache - .insert_icb( - ino, - InodeControlBlock { - rc: 0, - path: display_name.into(), - parent: Some(parent_ino), - attr: None, - }, - ) - .await; - - let repo = RepoFs::new( - self.client.clone(), - self.name.clone(), - repo_name.to_owned(), - default_branch.to_owned(), - self.icache.fs_owner(), - ); - - let mut bridge = HashMapBridge::new(); - bridge.insert_inode(ino, RepoFs::ROOT_INO); - - let idx = self.repos.len(); - self.repos.push(RepoSlot { repo, bridge }); - self.repo_inodes.insert(ino, idx); - ino - }; - - let attr = self - .icache - .get_or_resolve(ino, |icb| icb.attr.expect("resolved ICB must have attr")) - .await - .unwrap(); // OrgResolver is infallible - (ino, attr) -} -``` - -Key changes: -- Removed all manual `FileAttr::Directory` construction (was in 2 places) -- Removed the "attr missing → rebuilding" warn/fallback path -- Side-effects (RepoFs, bridge, repo_inodes) preserved in the else branch - -**Step 2: Verify** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` - -**Step 3: Commit** - -```bash -git add src/fs/mescloud/org.rs -git commit -m "refactor: ensure_repo_inode delegates attr creation to OrgResolver" -``` - ---- - -### Task 4: Refactor `ensure_org_inode` to use resolver - -**Files:** -- Modify: `src/fs/mescloud/mod.rs:181-253` - -**Step 1: Replace the method body** - -```rust -async fn ensure_org_inode(&mut self, org_idx: usize) -> (Inode, FileAttr) { - let existing_ino = self - .org_inodes - .iter() - .find(|&(_, &idx)| idx == org_idx) - .map(|(&ino, _)| ino); - - let ino = if let Some(ino) = existing_ino { - let rc = self - .icache - .get_icb(ino, |icb| icb.rc) - .await - .unwrap_or(0); - trace!(ino, org_idx, rc, "ensure_org_inode: reusing existing inode"); - ino - } else { - let org_name = self.org_slots[org_idx].org.name().to_owned(); - let ino = self.icache.allocate_inode(); - trace!(ino, org_idx, org = %org_name, "ensure_org_inode: allocated new inode"); - - self.icache - .insert_icb( - ino, - InodeControlBlock { - rc: 0, - path: org_name.as_str().into(), - parent: Some(Self::ROOT_NODE_INO), - attr: None, - }, - ) - .await; - - self.org_inodes.insert(ino, org_idx); - self.org_slots[org_idx] - .bridge - .insert_inode(ino, OrgFs::ROOT_INO); - ino - }; - - let attr = self - .icache - .get_or_resolve(ino, |icb| icb.attr.expect("resolved ICB must have attr")) - .await - .unwrap(); // MesaResolver is infallible - (ino, attr) -} -``` - -Key changes: -- Removed all manual `FileAttr::Directory` construction (was in 2 places) -- Removed the "attr missing → rebuilding" warn/fallback path -- Side-effects (org_inodes, bridge seeding) preserved - -**Step 2: Remove unused `SystemTime` import if no longer needed** - -Check if `SystemTime` is still used in `mod.rs`. It is used in `MesaResolver::resolve` and `MescloudICache::new`, so it stays. - -**Step 3: Verify** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` - -**Step 4: Commit** - -```bash -git add src/fs/mescloud/mod.rs -git commit -m "refactor: ensure_org_inode delegates attr creation to MesaResolver" -``` - ---- - -### Task 5: Remove unused imports - -**Files:** -- Modify: `src/fs/mescloud/org.rs` (check for unused `SystemTime`, `mescloud_icache` usage) -- Modify: `src/fs/mescloud/mod.rs` (same check) - -**Step 1: Check and remove unused imports** - -After the refactoring, check whether `SystemTime` and `mescloud_icache::make_common_file_attr` are still used in each file outside of the resolver. The resolvers still use them, so they likely stay. But verify with clippy. - -**Step 2: Verify** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` - -**Step 3: Commit (if changes needed)** - -```bash -git add src/fs/mescloud/org.rs src/fs/mescloud/mod.rs -git commit -m "chore: remove unused imports after resolver refactoring" -``` diff --git a/docs/plans/2026-02-10-split-async-contains.md b/docs/plans/2026-02-10-split-async-contains.md deleted file mode 100644 index 6ce0eaa..0000000 --- a/docs/plans/2026-02-10-split-async-contains.md +++ /dev/null @@ -1,287 +0,0 @@ -# Split AsyncICache::contains into Sync Methods - -> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. - -**Goal:** Replace the async `contains()` method with two non-async variants to eliminate debug/release control flow divergence caused by awaiting InFlight entries in debug_asserts. - -**Architecture:** The async `contains()` currently awaits InFlight entries (potentially blocking on network I/O), which changes control flow in debug vs release builds. We split it into `contains()` (sync, key exists in any state) and `contains_resolved()` (sync, key is Available). The internal `wait_for_available()` remains for other methods that genuinely need to await. - -**Tech Stack:** Rust, scc::HashMap (has `read_sync` and `contains_sync`), tokio - ---- - -### Task 1: Add sync `contains` and `contains_resolved` to `AsyncICache` - -**Files:** -- Modify: `src/fs/icache/async_cache.rs:119-122` - -**Step 1: Add the two new methods** - -Add these methods to the `impl AsyncICache` block, replacing the existing `pub async fn contains`: - -```rust - /// Check whether `ino` has an entry in the table (either `InFlight` or `Available`). - /// - /// This is a non-blocking, synchronous check. It does **not** wait for - /// `InFlight` entries to resolve. - pub fn contains(&self, ino: Inode) -> bool { - self.inode_table.contains_sync(&ino) - } - - /// Check whether `ino` is fully resolved (`Available`). - /// - /// Returns `false` if the entry is missing **or** still `InFlight`. - /// This is a non-blocking, synchronous check. - pub fn contains_resolved(&self, ino: Inode) -> bool { - self.inode_table - .read_sync(&ino, |_, s| matches!(s, IcbState::Available(_))) - .unwrap_or(false) - } -``` - -**Step 2: Run verify** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` - -This will fail because tests still call `cache.contains(42).await` on the now non-async method. That's expected — we fix the tests in Task 4. - -**Step 3: Commit** - -```bash -git add src/fs/icache/async_cache.rs -git commit -m "feat: add sync contains() and contains_resolved() to AsyncICache" -``` - ---- - -### Task 2: Update `MescloudICache` delegation - -**Files:** -- Modify: `src/fs/mescloud/icache.rs:101-105` - -**Step 1: Replace the async delegation with sync delegations** - -Replace: -```rust - pub async fn contains(&self, ino: Inode) -> bool { - self.inner.contains(ino).await - } -``` - -With: -```rust - pub fn contains(&self, ino: Inode) -> bool { - self.inner.contains(ino) - } - - pub fn contains_resolved(&self, ino: Inode) -> bool { - self.inner.contains_resolved(ino) - } -``` - -**Step 2: Run verify** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` - -This will still fail from tests — expected. - -**Step 3: Commit** - -```bash -git add src/fs/mescloud/icache.rs -git commit -m "feat: add sync contains/contains_resolved to MescloudICache" -``` - ---- - -### Task 3: Replace `debug_assert!` usages in `repo.rs` - -**Files:** -- Modify: `src/fs/mescloud/repo.rs` (lines 228-231, 260-263, 352, 450-453) - -**Step 1: Update the three debug_asserts** - -In `lookup` (line 228-231), replace: -```rust - debug_assert!( - self.icache.contains(parent).await, - "lookup: parent inode {parent} not in inode table" - ); -``` -With: -```rust - debug_assert!( - self.icache.contains(parent), - "lookup: parent inode {parent} not in inode table" - ); -``` - -In `readdir` (line 260-263), replace: -```rust - debug_assert!( - self.icache.contains(ino).await, - "readdir: inode {ino} not in inode table" - ); -``` -With: -```rust - debug_assert!( - self.icache.contains(ino), - "readdir: inode {ino} not in inode table" - ); -``` - -In `forget` (line 450-453), replace: -```rust - debug_assert!( - self.icache.contains(ino).await, - "forget: inode {ino} not in inode table" - ); -``` -With: -```rust - debug_assert!( - self.icache.contains(ino), - "forget: inode {ino} not in inode table" - ); -``` - -**Step 2: Update the `open` guard** - -In `open` (line 352), replace: -```rust - if !self.icache.contains(ino).await { -``` -With: -```rust - if !self.icache.contains(ino) { -``` - -The `open` method doesn't read ICB data — it only allocates a file handle. Subsequent `read` calls go through `path_of_inode` → `get_icb` which properly awaits InFlight entries. FUSE guarantees the inode was previously looked up, so it must be in the table (InFlight or Available). - -**Step 3: Run verify** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` - -Still expect test failures from `async_cache.rs` tests. - -**Step 4: Commit** - -```bash -git add src/fs/mescloud/repo.rs -git commit -m "fix: use sync contains() in debug_asserts and open guard" -``` - ---- - -### Task 4: Update `async_cache.rs` tests - -**Files:** -- Modify: `src/fs/icache/async_cache.rs` (test module, lines 460+) - -**Step 1: Update tests that used `contains(...).await`** - -The following test assertions need their `.await` removed since `contains()` is now sync: - -`contains_returns_true_for_root` (line 463): -```rust -assert!(cache.contains(1), "root should exist"); -``` - -`contains_returns_false_for_missing` (line 469): -```rust -assert!(!cache.contains(999), "missing inode should not exist"); -``` - -`contains_after_resolver_completes` (line 493): -```rust -assert!(cache.contains(42), "should be true after resolve"); -``` - -`insert_icb_adds_entry` (line 564): -```rust -assert!(cache.contains(42), "inserted entry should exist"); -``` - -`forget_evicts_when_rc_drops_to_zero` (line 700): -```rust -assert!(!cache.contains(42), "evicted entry should be gone"); -``` - -`wait_does_not_miss_signal_on_immediate_complete` (lines 795-801): This test exercises the awaiting behavior of the old `contains`. It should now test `contains_resolved` or be refactored. Replace: -```rust - let result = - tokio::time::timeout(std::time::Duration::from_millis(100), cache.contains(42)).await; - assert_eq!( - result, - Ok(true), - "should not hang on already-completed entry" - ); -``` -With: -```rust - assert!(cache.contains(42), "entry should exist in table"); - assert!( - cache.contains_resolved(42), - "should be resolved after insert_icb overwrote InFlight" - ); -``` - -`get_or_resolve_resolves_missing` (line 840): -```rust -assert!(cache.contains(42)); -``` - -`get_or_resolve_propagates_error` (line 853): -```rust -assert!(!cache.contains(42)); -``` - -**Step 2: Add dedicated tests for `contains_resolved`** - -Add after the existing `contains_returns_false_for_missing` test: - -```rust - #[tokio::test] - async fn contains_resolved_returns_true_for_root() { - let cache = test_cache(); - assert!(cache.contains_resolved(1), "root should be resolved"); - } - - #[tokio::test] - async fn contains_resolved_returns_false_for_missing() { - let cache = test_cache(); - assert!( - !cache.contains_resolved(999), - "missing inode should not be resolved" - ); - } - - #[tokio::test] - async fn contains_resolved_returns_false_for_inflight() { - let cache = test_cache(); - let (_tx, rx) = watch::channel(()); - cache - .inode_table - .upsert_async(42, IcbState::InFlight(rx)) - .await; - assert!(cache.contains(42), "InFlight entry should exist"); - assert!( - !cache.contains_resolved(42), - "InFlight entry should not be resolved" - ); - } -``` - -**Step 3: Run verify** - -Run: `cargo fmt --all && cargo clippy --all-targets --all-features -- -D warnings && cargo test --quiet` - -Expected: ALL PASS - -**Step 4: Commit** - -```bash -git add src/fs/icache/async_cache.rs -git commit -m "test: update tests for sync contains/contains_resolved" -```