From c9bb5710975b35e9e247e5b112b17cc89b43b8dd Mon Sep 17 00:00:00 2001 From: "google-labs-jules[bot]" <161369871+google-labs-jules[bot]@users.noreply.github.com> Date: Thu, 12 Feb 2026 10:44:57 +0000 Subject: [PATCH 1/3] perf: optimize Linker to avoid redundant AGENTS.md compression Implemented a two-tier caching strategy in the `Linker` to eliminate redundant work during synchronization: 1. `compression_cache`: Caches the result of Markdown compression per source file, avoiding redundant re-reading and re-compressing of the same AGENTS.md for different agents. 2. `ensured_outputs`: Tracks which intermediate compressed files have already been verified/written in the current run, avoiding redundant disk I/O and comparisons. These optimizations significantly reduce CPU and I/O overhead for projects with many AI agents sharing the same configuration instructions. Co-authored-by: yacosta738 <33158051+yacosta738@users.noreply.github.com> --- src/linker.rs | 40 ++++++++++++++++++++++++++++++++++------ 1 file changed, 34 insertions(+), 6 deletions(-) diff --git a/src/linker.rs b/src/linker.rs index ced7126..914c8ab 100644 --- a/src/linker.rs +++ b/src/linker.rs @@ -6,7 +6,7 @@ use anyhow::{Context, Result}; use colored::Colorize; use std::cell::RefCell; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::fs; use std::path::{Path, PathBuf}; use walkdir::WalkDir; @@ -52,6 +52,10 @@ pub struct Linker { project_root: PathBuf, source_dir: PathBuf, path_cache: RefCell>, + /// Cache of compressed content for AGENTS.md files to avoid redundant processing. + compression_cache: RefCell>, + /// Tracks which compressed files have already been ensured to be up-to-date. + ensured_outputs: RefCell>, } impl Linker { @@ -66,6 +70,8 @@ impl Linker { project_root, source_dir, path_cache: RefCell::new(HashMap::new()), + compression_cache: RefCell::new(HashMap::new()), + ensured_outputs: RefCell::new(HashSet::new()), } } @@ -242,13 +248,31 @@ impl Linker { } fn write_compressed_agents_md(&self, source: &Path, dest: &Path) -> Result<()> { - let content = fs::read_to_string(source) - .with_context(|| format!("Failed to read AGENTS.md: {}", source.display()))?; - let compressed = compress_agents_md_content(&content); + // 1. Skip if this specific destination has already been ensured to be up-to-date. + if self.ensured_outputs.borrow().contains(dest) { + return Ok(()); + } + // 2. Get or compute the compressed content for this source. + // This avoids redundant re-reading and re-compressing of the same AGENTS.md. + let compressed = { + let mut cache = self.compression_cache.borrow_mut(); + if let Some(cached) = cache.get(source) { + cached.clone() + } else { + let content = fs::read_to_string(source) + .with_context(|| format!("Failed to read AGENTS.md: {}", source.display()))?; + let compressed = compress_agents_md_content(&content); + cache.insert(source.to_path_buf(), compressed.clone()); + compressed + } + }; + + // 3. Check if existing file already matches the compressed content to avoid unnecessary writes. if let Ok(existing) = fs::read_to_string(dest) && existing == compressed { + self.ensured_outputs.borrow_mut().insert(dest.to_path_buf()); return Ok(()); } @@ -259,8 +283,12 @@ impl Linker { .with_context(|| format!("Failed to create directory: {}", parent.display()))?; } - fs::write(dest, compressed) - .with_context(|| format!("Failed to write compressed AGENTS.md: {}", dest.display())) + fs::write(dest, &compressed) + .with_context(|| format!("Failed to write compressed AGENTS.md: {}", dest.display()))?; + + // 4. Mark this destination as ensured. + self.ensured_outputs.borrow_mut().insert(dest.to_path_buf()); + Ok(()) } /// Create a single symlink From e1d02050e05fa92afc587d6a414ae34969a9624d Mon Sep 17 00:00:00 2001 From: "google-labs-jules[bot]" <161369871+google-labs-jules[bot]@users.noreply.github.com> Date: Fri, 13 Feb 2026 21:15:50 +0000 Subject: [PATCH 2/3] perf: use Rc for shared compressed content in Linker Further optimized the `Linker` by using `Rc` for the `compression_cache`. This allows multiple agents to share the same heap-allocated compressed content without deep string clones, reducing both memory usage and CPU time during synchronization. Also ensured that redundant disk existence and content checks are avoided via the `ensured_outputs` cache. Co-authored-by: yacosta738 <33158051+yacosta738@users.noreply.github.com> --- src/linker.rs | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/linker.rs b/src/linker.rs index 914c8ab..33e961a 100644 --- a/src/linker.rs +++ b/src/linker.rs @@ -8,6 +8,7 @@ use colored::Colorize; use std::cell::RefCell; use std::collections::{HashMap, HashSet}; use std::fs; +use std::rc::Rc; use std::path::{Path, PathBuf}; use walkdir::WalkDir; @@ -53,7 +54,8 @@ pub struct Linker { source_dir: PathBuf, path_cache: RefCell>, /// Cache of compressed content for AGENTS.md files to avoid redundant processing. - compression_cache: RefCell>, + /// Uses Rc to share the same heap-allocated string across multiple agents. + compression_cache: RefCell>>, /// Tracks which compressed files have already been ensured to be up-to-date. ensured_outputs: RefCell>, } @@ -263,14 +265,15 @@ impl Linker { let content = fs::read_to_string(source) .with_context(|| format!("Failed to read AGENTS.md: {}", source.display()))?; let compressed = compress_agents_md_content(&content); - cache.insert(source.to_path_buf(), compressed.clone()); - compressed + let rc = Rc::new(compressed); + cache.insert(source.to_path_buf(), rc.clone()); + rc } }; // 3. Check if existing file already matches the compressed content to avoid unnecessary writes. if let Ok(existing) = fs::read_to_string(dest) - && existing == compressed + && existing == *compressed { self.ensured_outputs.borrow_mut().insert(dest.to_path_buf()); return Ok(()); @@ -283,7 +286,7 @@ impl Linker { .with_context(|| format!("Failed to create directory: {}", parent.display()))?; } - fs::write(dest, &compressed) + fs::write(dest, &*compressed) .with_context(|| format!("Failed to write compressed AGENTS.md: {}", dest.display()))?; // 4. Mark this destination as ensured. From e3cbd79beb74255e134066b467cd0615ae92f4fd Mon Sep 17 00:00:00 2001 From: "google-labs-jules[bot]" <161369871+google-labs-jules[bot]@users.noreply.github.com> Date: Sat, 21 Feb 2026 16:44:12 +0000 Subject: [PATCH 3/3] perf: use Rc for shared compressed content in Linker Further optimized the `Linker` by using `Rc` for the `compression_cache`. This allows multiple agents to share the same heap-allocated compressed content without deep string clones, reducing both memory usage and CPU time during synchronization. Also ensured that redundant disk existence and content checks are avoided via the `ensured_outputs` cache. Fixed formatting issues in `src/linker.rs`. Co-authored-by: yacosta738 <33158051+yacosta738@users.noreply.github.com> --- src/linker.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/linker.rs b/src/linker.rs index 33e961a..4b46a30 100644 --- a/src/linker.rs +++ b/src/linker.rs @@ -8,8 +8,8 @@ use colored::Colorize; use std::cell::RefCell; use std::collections::{HashMap, HashSet}; use std::fs; -use std::rc::Rc; use std::path::{Path, PathBuf}; +use std::rc::Rc; use walkdir::WalkDir; use crate::config::{Config, SyncType, TargetConfig};