From df9815bf560bd190c7b8bcdaff369ca42f893a44 Mon Sep 17 00:00:00 2001 From: Sopaco Date: Sun, 8 Mar 2026 20:49:11 +0800 Subject: [PATCH 01/20] Bump cortex-mem crates from 2.5.0 to 2.5.1 --- Cargo.lock | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 945837f..43c9064 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -574,7 +574,7 @@ dependencies = [ [[package]] name = "cortex-mem-cli" -version = "2.5.0" +version = "2.5.1" dependencies = [ "anyhow", "chrono", @@ -592,7 +592,7 @@ dependencies = [ [[package]] name = "cortex-mem-config" -version = "2.5.0" +version = "2.5.1" dependencies = [ "anyhow", "directories 5.0.1", @@ -603,7 +603,7 @@ dependencies = [ [[package]] name = "cortex-mem-core" -version = "2.5.0" +version = "2.5.1" dependencies = [ "anyhow", "async-trait", @@ -632,7 +632,7 @@ dependencies = [ [[package]] name = "cortex-mem-mcp" -version = "2.5.0" +version = "2.5.1" dependencies = [ "anyhow", "async-trait", @@ -655,7 +655,7 @@ dependencies = [ [[package]] name = "cortex-mem-rig" -version = "2.5.0" +version = "2.5.1" dependencies = [ "anyhow", "async-trait", @@ -671,7 +671,7 @@ dependencies = [ [[package]] name = "cortex-mem-service" -version = "2.5.0" +version = "2.5.1" dependencies = [ "anyhow", "axum", @@ -694,7 +694,7 @@ dependencies = [ [[package]] name = "cortex-mem-tars" -version = "2.5.0" +version = "2.5.1" dependencies = [ "anyhow", "async-stream", @@ -734,7 +734,7 @@ dependencies = [ [[package]] name = "cortex-mem-tools" -version = "2.5.0" +version = "2.5.1" dependencies = [ "anyhow", "async-trait", From 91f3eca37d81625f0551247b85170e1abe915855 Mon Sep 17 00:00:00 2001 From: Sopaco Date: Sun, 8 Mar 2026 21:55:30 +0800 Subject: [PATCH 02/20] Remove automation config from TOML files and update docs The automation configuration has been moved from TOML files to code-level configuration through the AutomationConfig struct. Updated documentation to reflect the new configuration approach and added parameters for max_concurrent_llm_tasks and generate_layers_every_n_messages. --- README.md | 9 --- README_zh.md | 9 --- cortex-mem-core/README.md | 2 + litho.docs/en/3.Workflow.md | 7 +- .../Automation Management Domain.md | 75 ++++++++----------- ...70\345\277\203\346\265\201\347\250\213.md" | 7 +- ...41\347\220\206\351\242\206\345\237\237.md" | 63 ++++++++-------- 7 files changed, 76 insertions(+), 96 deletions(-) diff --git a/README.md b/README.md index 990ca16..51de240 100644 --- a/README.md +++ b/README.md @@ -448,15 +448,6 @@ timeout_secs = 30 # Timeout for embedding requests # ----------------------------------------------------------------------------- [cortex] data_dir = "./cortex-data" # Directory for storing memory files and sessions - -# ----------------------------------------------------------------------------- -# Automation Configuration -# ----------------------------------------------------------------------------- -[automation] -auto_index = true # Enable automatic indexing on file changes -auto_extract = true # Enable automatic extraction on session close -index_interval_secs = 5 # Polling interval for file watcher -batch_delay_secs = 2 # Delay before processing batched changes ``` # 🚀 Usage diff --git a/README_zh.md b/README_zh.md index ce9fcf5..0afba2e 100644 --- a/README_zh.md +++ b/README_zh.md @@ -450,15 +450,6 @@ timeout_secs = 30 # 嵌入请求的超时时间 # ----------------------------------------------------------------------------- [cortex] data_dir = "./cortex-data" # 用于存储内存文件和会话的目录 - -# ----------------------------------------------------------------------------- -# 自动化配置 -# ----------------------------------------------------------------------------- -[automation] -auto_index = true # 在文件更改时启用自动索引 -auto_extract = true # 在会话关闭时启用自动提取 -index_interval_secs = 5 # 文件监视器的轮询间隔 -batch_delay_secs = 2 # 处理批量更改前的延迟 ``` # 🚀 使用方法 diff --git a/cortex-mem-core/README.md b/cortex-mem-core/README.md index 6a97a67..5b21fb4 100644 --- a/cortex-mem-core/README.md +++ b/cortex-mem-core/README.md @@ -394,6 +394,8 @@ pub struct AutomationConfig { pub index_on_close: bool, // Default: true pub index_batch_delay: u64, // Default: 2 seconds pub auto_generate_layers_on_startup: bool, // Default: false + pub generate_layers_every_n_messages: usize, // Default: 0 (disabled) + pub max_concurrent_llm_tasks: usize, // Default: 3 } ``` diff --git a/litho.docs/en/3.Workflow.md b/litho.docs/en/3.Workflow.md index 0266bf1..d61ae02 100644 --- a/litho.docs/en/3.Workflow.md +++ b/litho.docs/en/3.Workflow.md @@ -252,10 +252,11 @@ flowchart TD ```rust AutomationConfig { auto_index: true, - index_on_message: true, // ✅ Immediate indexing - index_on_close: true, // ✅ Session close triggers L0/L1 generation + indexing + index_on_message: true, // Immediate indexing + index_on_close: true, // ssion close triggers L0/L1 generation + indexing index_batch_delay: 1, - generate_layers_every_n_messages: 5, // ✅ Periodic L0/L1 generation + generate_layers_every_n_messages: 5, // Periodic L0/L1 generation + max_concurrent_llm_tasks: 3, // Concurrent LLM task limit } ``` diff --git a/litho.docs/en/4.Deep-Exploration/Automation Management Domain.md b/litho.docs/en/4.Deep-Exploration/Automation Management Domain.md index 0fdd898..8e134f5 100644 --- a/litho.docs/en/4.Deep-Exploration/Automation Management Domain.md +++ b/litho.docs/en/4.Deep-Exploration/Automation Management Domain.md @@ -101,18 +101,19 @@ graph TB **Configuration Dependencies**: ```rust pub struct AutomationConfig { - pub auto_index: bool, // Enable automatic indexing - pub auto_extract: bool, // Enable memory extraction on session close - pub index_on_message: bool, // Index on message add (default: true) + pub auto_index: bool, // Enable automatic indexing (default: true) + pub auto_extract: bool, // Enable memory extraction on session close (default: true) + pub index_on_message: bool, // Index on message add (default: false) pub index_on_close: bool, // Index on session close (default: true) - pub poll_interval_secs: u64, // Filesystem polling frequency (default: 5) - pub batch_delay_secs: u64, // Batching window for indexing (default: 2) - pub sync_on_startup: bool, // Run full sync on system start + pub index_batch_delay: u64, // Batching window seconds for indexing (default: 2) pub auto_generate_layers_on_startup: bool, // Generate L0/L1 on startup (default: false) - pub generate_layers_every_n_messages: usize, // Periodic L0/L1 generation (0 = disabled) + pub generate_layers_every_n_messages: usize, // Periodic L0/L1 generation (default: 0, disabled) + pub max_concurrent_llm_tasks: usize, // Max concurrent LLM tasks (default: 3) } ``` +> **Note**: `AutomationConfig` is a code-level configuration, set programmatically when initializing `AutomationManager`, not loaded from TOML configuration files. This allows integrators to flexibly adjust automation behavior according to their needs. + ### 3.2 File System Watcher (`automation/watcher.rs`) **Responsibility**: Monitors `cortex://session` directories for changes in conversation files (`.md` format) using a polling mechanism. @@ -302,44 +303,34 @@ sequenceDiagram ### 5.1 Configuration Schema -Automation behavior is controlled via the `[automation]` section in `cortex-mem.toml`: - -```toml -[automation] -# Enable automatic indexing of new files -auto_index = true - -# Enable automatic extraction when sessions close -auto_extract = true - -# Index immediately on message add (recommended for real-time search) -index_on_message = true - -# Generate L0/L1 and index on session close -index_on_close = true - -# Filesystem polling interval in seconds -poll_interval_secs = 5 - -# Batch processing delay to group rapid changes -batch_delay_secs = 2 +Automation behavior is configured through the `AutomationConfig` struct in code. Here is a typical configuration example: -# Perform full sync on startup -sync_on_startup = false - -# Generate missing L0/L1 layers on startup (can cause startup delay) -auto_generate_layers_on_startup = false - -# Generate L0/L1 every N messages (0 = disabled) -generate_layers_every_n_messages = 5 - -# Maximum concurrent indexing operations -max_concurrent_indexes = 10 - -# Extraction confidence threshold (0.0 - 1.0) -extraction_confidence_threshold = 0.7 +```rust +let automation_config = AutomationConfig { + auto_index: true, + auto_extract: true, + index_on_message: false, // Performance consideration, default off + index_on_close: true, // Batch index on session close + index_batch_delay: 2, // Batch processing delay (seconds) + auto_generate_layers_on_startup: false, // Avoid startup blocking + generate_layers_every_n_messages: 5, // Generate L0/L1 every 5 messages + max_concurrent_llm_tasks: 3, // Limit concurrent LLM calls +}; ``` +**Configuration Parameters**: + +| Parameter | Default | Description | +|-----------|---------|-------------| +| `auto_index` | `true` | Enable automatic vector indexing | +| `auto_extract` | `true` | Enable memory extraction on session close | +| `index_on_message` | `false` | Real-time indexing on message add (recommended off for performance) | +| `index_on_close` | `true` | Batch indexing on session close | +| `index_batch_delay` | `2` | Batch delay seconds to group rapid changes | +| `auto_generate_layers_on_startup` | `false` | Generate missing L0/L1 files on startup | +| `generate_layers_every_n_messages` | `0` | Trigger L0/L1 generation every N messages (0=disabled) | +| `max_concurrent_llm_tasks` | `3` | Maximum concurrent LLM tasks to prevent API overload | + ### 5.2 Multi-Tenant Considerations In multi-tenant deployments: diff --git "a/litho.docs/zh/3\343\200\201\346\240\270\345\277\203\346\265\201\347\250\213.md" "b/litho.docs/zh/3\343\200\201\346\240\270\345\277\203\346\265\201\347\250\213.md" index 8bb1dd5..f1f5d25 100644 --- "a/litho.docs/zh/3\343\200\201\346\240\270\345\277\203\346\265\201\347\250\213.md" +++ "b/litho.docs/zh/3\343\200\201\346\240\270\345\277\203\346\265\201\347\250\213.md" @@ -252,10 +252,11 @@ flowchart TD ```rust AutomationConfig { auto_index: true, - index_on_message: true, // ✅ 立即索引 - index_on_close: true, // ✅ 会话关闭时生成L0/L1并索引 + index_on_message: true, // 立即索引 + index_on_close: true, // 会话关闭时生成L0/L1并索引 index_batch_delay: 1, - generate_layers_every_n_messages: 5, // ✅ 定期L0/L1生成 + generate_layers_every_n_messages: 5, // 定期L0/L1生成 + max_concurrent_llm_tasks: 3, // 并发LLM任务限制 } ``` diff --git "a/litho.docs/zh/4\343\200\201\346\267\261\345\205\245\346\216\242\347\264\242/\350\207\252\345\212\250\345\214\226\347\256\241\347\220\206\351\242\206\345\237\237.md" "b/litho.docs/zh/4\343\200\201\346\267\261\345\205\245\346\216\242\347\264\242/\350\207\252\345\212\250\345\214\226\347\256\241\347\220\206\351\242\206\345\237\237.md" index fad5323..6fac7b5 100644 --- "a/litho.docs/zh/4\343\200\201\346\267\261\345\205\245\346\216\242\347\264\242/\350\207\252\345\212\250\345\214\226\347\256\241\347\220\206\351\242\206\345\237\237.md" +++ "b/litho.docs/zh/4\343\200\201\346\267\261\345\205\245\346\216\242\347\264\242/\350\207\252\345\212\250\345\214\226\347\256\241\347\220\206\351\242\206\345\237\237.md" @@ -102,18 +102,19 @@ graph TB **配置依赖**: ```rust pub struct AutomationConfig { - pub auto_index: bool, // 启用自动索引 - pub auto_extract: bool, // 启用会话关闭时记忆提取 - pub index_on_message: bool, // 消息添加时索引(默认:true) + pub auto_index: bool, // 启用自动索引(默认:true) + pub auto_extract: bool, // 启用会话关闭时记忆提取(默认:true) + pub index_on_message: bool, // 消息添加时索引(默认:false) pub index_on_close: bool, // 会话关闭时索引(默认:true) - pub poll_interval_secs: u64, // 文件系统轮询频率(默认:5) - pub batch_delay_secs: u64, // 索引批量窗口(默认:2) - pub sync_on_startup: bool, // 系统启动时运行完整同步 + pub index_batch_delay: u64, // 索引批量窗口秒数(默认:2) pub auto_generate_layers_on_startup: bool, // 启动时生成L0/L1(默认:false) - pub generate_layers_every_n_messages: usize, // 定期L0/L1生成(0 = 禁用) + pub generate_layers_every_n_messages: usize, // 定期L0/L1生成(默认:0,禁用) + pub max_concurrent_llm_tasks: usize, // 最大并发LLM任务数(默认:3) } ``` +> **注意**: `AutomationConfig` 是代码级配置,在初始化 `AutomationManager` 时通过编程方式设置,而非通过 TOML 配置文件加载。这允许集成方根据自身需求灵活调整自动化行为。 + ### 3.2 文件系统监视器 (`automation/watcher.rs`) **职责**: 使用轮询机制监视`cortex://session`目录中对话文件(`.md`格式)的变化。 @@ -303,32 +304,34 @@ sequenceDiagram ### 5.1 配置模式 -自动化行为通过`cortex-mem.toml`中的`[automation]`部分控制: - -```toml -[automation] -# 启用新文件自动索引 -auto_index = true - -# 会话关闭时启用自动提取 -auto_extract = true - -# 文件系统轮询间隔(秒) -poll_interval_secs = 5 - -# 批量处理延迟以分组快速变化 -batch_delay_secs = 2 +自动化行为通过 `AutomationConfig` 结构体在代码中配置。以下是一个典型的配置示例: -# 启动时执行完整同步 -sync_on_startup = false - -# 最大并发索引操作 -max_concurrent_indexes = 10 - -# 提取置信度阈值(0.0 - 1.0) -extraction_confidence_threshold = 0.7 +```rust +let automation_config = AutomationConfig { + auto_index: true, + auto_extract: true, + index_on_message: false, // 性能考虑,默认不实时索引 + index_on_close: true, // 会话关闭时批量索引 + index_batch_delay: 2, // 批量处理延迟(秒) + auto_generate_layers_on_startup: false, // 避免启动阻塞 + generate_layers_every_n_messages: 5, // 每5条消息生成L0/L1 + max_concurrent_llm_tasks: 3, // 限制并发LLM调用 +}; ``` +**配置参数说明**: + +| 参数 | 默认值 | 说明 | +|------|--------|------| +| `auto_index` | `true` | 启用自动向量索引 | +| `auto_extract` | `true` | 启用会话关闭时记忆提取 | +| `index_on_message` | `false` | 消息添加时实时索引(性能敏感场景建议关闭) | +| `index_on_close` | `true` | 会话关闭时批量索引 | +| `index_batch_delay` | `2` | 批量处理延迟秒数,用于合并快速变化 | +| `auto_generate_layers_on_startup` | `false` | 启动时生成缺失的L0/L1文件 | +| `generate_layers_every_n_messages` | `0` | 每N条消息触发L0/L1生成(0=禁用) | +| `max_concurrent_llm_tasks` | `3` | 最大并发LLM任务数,防止压垮API | + ### 5.2 多租户考虑 在多租户部署中: From 65f11700c048b0e1138254bc051b76565787e059 Mon Sep 17 00:00:00 2001 From: Sopaco Date: Sun, 8 Mar 2026 22:27:59 +0800 Subject: [PATCH 03/20] Translate Chinese comments and log messages to English --- .../src/automation/layer_generator.rs | 182 +++++++++--------- cortex-mem-core/src/automation/manager.rs | 10 +- cortex-mem-core/src/cascade_layer_updater.rs | 22 +-- cortex-mem-core/src/layers/generator.rs | 12 +- cortex-mem-core/src/llm/client.rs | 24 +-- .../src/memory_event_coordinator.rs | 80 ++++---- cortex-mem-core/src/session/extraction.rs | 6 +- cortex-mem-tools/src/operations.rs | 70 +++---- 8 files changed, 203 insertions(+), 203 deletions(-) diff --git a/cortex-mem-core/src/automation/layer_generator.rs b/cortex-mem-core/src/automation/layer_generator.rs index 3685f4e..b1419ea 100644 --- a/cortex-mem-core/src/automation/layer_generator.rs +++ b/cortex-mem-core/src/automation/layer_generator.rs @@ -100,26 +100,26 @@ impl LayerGenerator { for scope in &["session", "user", "agent", "resources"] { let scope_uri = format!("cortex://{}", scope); - // 检查维度是否存在 + // Check if scope exists match self.filesystem.exists(&scope_uri).await { Ok(true) => { - log::info!("📂 扫描维度: {} ({})", scope, scope_uri); + log::info!("Scanning scope: {} ({})", scope, scope_uri); match self.scan_scope(&scope_uri).await { Ok(dirs) => { - log::info!("📂 维度 {} 发现 {} 个目录", scope, dirs.len()); + log::info!("Scope {} found {} directories", scope, dirs.len()); directories.extend(dirs); } Err(e) => { - log::warn!("⚠️ 扫描维度 {} 失败: {}", scope, e); + log::warn!("Failed to scan scope {}: {}", scope, e); warn!("Failed to scan scope {}: {}", scope, e); } } } Ok(false) => { - log::info!("📂 维度 {} 不存在,跳过", scope); + log::info!("Scope {} does not exist, skipping", scope); } Err(e) => { - log::warn!("⚠️ 检查维度 {} 存在性失败: {}", scope, e); + log::warn!("Failed to check scope {} existence: {}", scope, e); } } } @@ -127,35 +127,35 @@ impl LayerGenerator { Ok(directories) } - /// 扫描单个维度 + /// Scan a single scope async fn scan_scope(&self, scope_uri: &str) -> Result> { let mut directories = Vec::new(); - // 先检查维度是否存在 + // First check if scope exists match self.filesystem.exists(scope_uri).await { Ok(true) => { - log::info!("📂 维度目录存在: {}", scope_uri); + log::info!("Scope directory exists: {}", scope_uri); } Ok(false) => { - log::info!("📂 维度目录不存在: {}", scope_uri); + log::info!("Scope directory does not exist: {}", scope_uri); return Ok(directories); } Err(e) => { - log::warn!("⚠️ 检查维度存在性失败: {} - {}", scope_uri, e); + log::warn!("Failed to check scope existence: {} - {}", scope_uri, e); return Ok(directories); } } - // 尝试列出目录内容 + // Try to list directory contents match self.filesystem.list(scope_uri).await { Ok(entries) => { - log::info!("📂 维度 {} 下有 {} 个条目", scope_uri, entries.len()); + log::info!("Scope {} has {} entries", scope_uri, entries.len()); for entry in &entries { - log::info!("📂 - {} (is_dir: {})", entry.name, entry.is_directory); + log::info!(" - {} (is_dir: {})", entry.name, entry.is_directory); } } Err(e) => { - log::warn!("⚠️ 列出维度目录失败: {} - {}", scope_uri, e); + log::warn!("Failed to list scope directory: {} - {}", scope_uri, e); return Ok(directories); } } @@ -230,30 +230,30 @@ impl LayerGenerator { Ok(missing) } - /// 确保所有目录拥有 L0/L1 + /// Ensure all directories have L0/L1 pub async fn ensure_all_layers(&self) -> Result { - log::info!("🔍 开始扫描目录..."); - info!("开始扫描目录..."); + log::info!("Starting directory scan..."); + info!("Starting directory scan..."); let directories = self.scan_all_directories().await?; - log::info!("📋 发现 {} 个目录", directories.len()); - info!("发现 {} 个目录", directories.len()); + log::info!("Found {} directories", directories.len()); + info!("Found {} directories", directories.len()); - // 🔧 Debug: 打印扫描到的目录 + // Debug: print scanned directories for dir in &directories { - log::debug!("扫描到目录: {}", dir); - debug!("扫描到目录: {}", dir); + log::debug!("Scanned directory: {}", dir); + debug!("Scanned directory: {}", dir); } - log::info!("🔎 检测缺失的 L0/L1..."); - info!("检测缺失的 L0/L1..."); + log::info!("Detecting missing L0/L1..."); + info!("Detecting missing L0/L1..."); let missing = self.filter_missing_layers(&directories).await?; - log::info!("📋 发现 {} 个目录缺失 L0/L1", missing.len()); - info!("发现 {} 个目录缺失 L0/L1", missing.len()); + log::info!("Found {} directories missing L0/L1", missing.len()); + info!("Found {} directories missing L0/L1", missing.len()); - // 🔧 Debug: 打印缺失层级文件的目录 + // Debug: print directories missing layer files for dir in &missing { - log::info!("📝 需要生成层级文件: {}", dir); - info!("需要生成层级文件: {}", dir); + log::info!("Need to generate layer files for: {}", dir); + info!("Need to generate layer files for: {}", dir); } if missing.is_empty() { @@ -270,53 +270,53 @@ impl LayerGenerator { failed: 0, }; - // 分批生成 + // Generate in batches let total_batches = (missing.len() + self.config.batch_size - 1) / self.config.batch_size; for (batch_idx, batch) in missing.chunks(self.config.batch_size).enumerate() { - log::info!("📦 处理批次 {}/{}", batch_idx + 1, total_batches); - info!("处理批次 {}/{}", batch_idx + 1, total_batches); + log::info!("Processing batch {}/{}", batch_idx + 1, total_batches); + info!("Processing batch {}/{}", batch_idx + 1, total_batches); for dir in batch { match self.generate_layers_for_directory(dir).await { Ok(_) => { stats.generated += 1; - log::info!("✅ 生成成功: {}", dir); - info!("✓ 生成成功: {}", dir); + log::info!("Generation succeeded: {}", dir); + info!("Generation succeeded: {}", dir); } Err(e) => { stats.failed += 1; - log::warn!("⚠️ 生成失败: {} - {}", dir, e); - warn!("✗ 生成失败: {} - {}", dir, e); + log::warn!("Generation failed: {} - {}", dir, e); + warn!("Generation failed: {} - {}", dir, e); } } } - // 批次间延迟 + // Delay between batches if batch_idx < total_batches - 1 { tokio::time::sleep(tokio::time::Duration::from_millis(self.config.delay_ms)).await; } } - log::info!("✅ 生成完成: 成功 {}, 失败 {}", stats.generated, stats.failed); - info!("生成完成: 成功 {}, 失败 {}", stats.generated, stats.failed); + log::info!("Generation completed: {} succeeded, {} failed", stats.generated, stats.failed); + info!("Generation completed: {} succeeded, {} failed", stats.generated, stats.failed); Ok(stats) } - /// 确保特定timeline目录拥有L0/L1层级文件 - /// 用于会话关闭时触发生成,避免频繁更新 + /// Ensure a specific timeline directory has L0/L1 layer files + /// Used when session closes to trigger generation, avoiding frequent updates pub async fn ensure_timeline_layers(&self, timeline_uri: &str) -> Result { - info!("开始为timeline生成层级文件: {}", timeline_uri); + info!("Starting layer generation for timeline: {}", timeline_uri); - // 扫描timeline下的所有目录 + // Scan all directories under timeline let mut directories = Vec::new(); self.scan_recursive(timeline_uri, &mut directories).await?; - info!("发现 {} 个timeline目录", directories.len()); + info!("Found {} timeline directories", directories.len()); - // 检测缺失的 L0/L1 + // Detect missing L0/L1 let missing = self.filter_missing_layers(&directories).await?; - info!("发现 {} 个目录缺失 L0/L1", missing.len()); + info!("Found {} directories missing L0/L1", missing.len()); if missing.is_empty() { return Ok(GenerationStats { @@ -332,67 +332,67 @@ impl LayerGenerator { failed: 0, }; - // 生成层级文件(不需要分批,因为timeline通常不大) + // Generate layer files (no need to batch, timeline is usually small) for dir in missing { match self.generate_layers_for_directory(&dir).await { Ok(_) => { stats.generated += 1; - info!("✓ 生成成功: {}", dir); + info!("Generation succeeded: {}", dir); } Err(e) => { stats.failed += 1; - warn!("✗ 生成失败: {} - {}", dir, e); + warn!("Generation failed: {} - {}", dir, e); } } } info!( - "Timeline层级生成完成: 成功 {}, 失败 {}", + "Timeline layer generation completed: {} succeeded, {} failed", stats.generated, stats.failed ); Ok(stats) } - /// 为单个目录生成 L0/L1 + /// Generate L0/L1 for a single directory async fn generate_layers_for_directory(&self, uri: &str) -> Result<()> { - debug!("生成层级文件: {}", uri); + debug!("Generating layer files for: {}", uri); - // 1. 检查是否需要重新生成(避免重复生成未变更的内容) + // 1. Check if regeneration is needed (avoid generating unchanged content) if !self.should_regenerate(uri).await? { - debug!("目录内容未变更,跳过生成: {}", uri); + debug!("Directory content unchanged, skipping generation: {}", uri); return Ok(()); } - // 2. 读取目录内容(聚合所有子文件) + // 2. Read directory content (aggregate all sub-files) let content = self.aggregate_directory_content(uri).await?; if content.is_empty() { - debug!("目录为空,跳过: {}", uri); + debug!("Directory is empty, skipping: {}", uri); return Ok(()); } - // 3. 使用现有的 AbstractGenerator 生成 L0 抽象 + // 3. Use existing AbstractGenerator to generate L0 abstract let abstract_text = self .abstract_gen .generate_with_llm(&content, &self.llm_client) .await?; - // 4. 使用现有的 OverviewGenerator 生成 L1 概览 + // 4. Use existing OverviewGenerator to generate L1 overview let overview = self .overview_gen .generate_with_llm(&content, &self.llm_client) .await?; - // 5. 强制执行长度限制 + // 5. Enforce length limits let abstract_text = self.enforce_abstract_limit(abstract_text)?; let overview = self.enforce_overview_limit(overview)?; - // 6. 添加 "Added" 日期标记(与 extraction.rs 保持一致) + // 6. Add "Added" date marker (consistent with extraction.rs) let timestamp = Utc::now().format("%Y-%m-%d %H:%M:%S UTC"); let abstract_with_date = format!("{}\n\n**Added**: {}", abstract_text, timestamp); let overview_with_date = format!("{}\n\n---\n\n**Added**: {}", overview, timestamp); - // 7. 写入文件 + // 7. Write files let abstract_path = format!("{}/.abstract.md", uri); let overview_path = format!("{}/.overview.md", uri); @@ -403,64 +403,64 @@ impl LayerGenerator { .write(&overview_path, &overview_with_date) .await?; - debug!("层级文件生成完成: {}", uri); + debug!("Layer files generated for: {}", uri); Ok(()) } - /// 检查是否需要重新生成层级文件 + /// Check if layer files need to be regenerated /// - /// 检查逻辑: - /// 1. 如果 .abstract.md 或 .overview.md 不存在 → 需要生成 - /// 2. 如果目录中有文件比 .abstract.md 更新 → 需要重新生成 - /// 3. 否则 → 跳过(避免重复生成) + /// Check logic: + /// 1. If .abstract.md or .overview.md doesn't exist → need to generate + /// 2. If files in directory are newer than .abstract.md → need to regenerate + /// 3. Otherwise → skip (avoid duplicate generation) async fn should_regenerate(&self, uri: &str) -> Result { let abstract_path = format!("{}/.abstract.md", uri); let overview_path = format!("{}/.overview.md", uri); - // 检查层级文件是否存在 + // Check if layer files exist let abstract_exists = self.filesystem.exists(&abstract_path).await?; let overview_exists = self.filesystem.exists(&overview_path).await?; if !abstract_exists || !overview_exists { - debug!("层级文件缺失,需要生成: {}", uri); + debug!("Layer files missing, need to generate: {}", uri); return Ok(true); } - // 读取 .abstract.md 中的时间戳 + // Read timestamp from .abstract.md let abstract_content = match self.filesystem.read(&abstract_path).await { Ok(content) => content, Err(_) => { - debug!("无法读取 .abstract.md,需要重新生成: {}", uri); + debug!("Cannot read .abstract.md, need to regenerate: {}", uri); return Ok(true); } }; - // 提取 "Added" 时间戳 + // Extract "Added" timestamp let abstract_timestamp = self.extract_added_timestamp(&abstract_content); if abstract_timestamp.is_none() { - debug!(".abstract.md 缺少时间戳,需要重新生成: {}", uri); + debug!(".abstract.md missing timestamp, need to regenerate: {}", uri); return Ok(true); } let abstract_time = abstract_timestamp.unwrap(); - // 检查目录中的文件是否有更新 + // Check if files in directory have updates let entries = self.filesystem.list(uri).await?; for entry in entries { - // 跳过隐藏文件和目录 + // Skip hidden files and directories if entry.name.starts_with('.') || entry.is_directory { continue; } - // 只检查 .md 和 .txt 文件 + // Only check .md and .txt files if entry.name.ends_with(".md") || entry.name.ends_with(".txt") { - // 读取文件内容,提取其中的时间戳(如果有) + // Read file content, extract timestamp if any if let Ok(file_content) = self.filesystem.read(&entry.uri).await { if let Some(file_time) = self.extract_added_timestamp(&file_content) { - // 如果文件时间戳晚于 abstract 时间戳,需要重新生成 + // If file timestamp is later than abstract timestamp, need to regenerate if file_time > abstract_time { - debug!("文件 {} 有更新,需要重新生成: {}", entry.name, uri); + debug!("File {} has updates, need to regenerate: {}", entry.name, uri); return Ok(true); } } @@ -468,18 +468,18 @@ impl LayerGenerator { } } - debug!("目录内容未变更,无需重新生成: {}", uri); + debug!("Directory content unchanged, no need to regenerate: {}", uri); Ok(false) } - /// 从内容中提取 "Added" 时间戳 + /// Extract "Added" timestamp from content fn extract_added_timestamp(&self, content: &str) -> Option> { - // 查找 "**Added**: YYYY-MM-DD HH:MM:SS UTC" 格式 + // Find "**Added**: YYYY-MM-DD HH:MM:SS UTC" format if let Some(start) = content.find("**Added**: ") { let timestamp_str = &content[start + 11..]; if let Some(end) = timestamp_str.find('\n') { let timestamp_str = ×tamp_str[..end].trim(); - // 解析时间戳 + // Parse timestamp if let Ok(dt) = DateTime::parse_from_str(timestamp_str, "%Y-%m-%d %H:%M:%S UTC") { return Some(dt.with_timezone(&Utc)); } @@ -594,9 +594,9 @@ impl LayerGenerator { Ok(result) } - /// 重新生成所有超大的 .abstract 文件 + /// Regenerate all oversized .abstract files pub async fn regenerate_oversized_abstracts(&self) -> Result { - info!("扫描超大的 .abstract 文件..."); + info!("Scanning for oversized .abstract files..."); let directories = self.scan_all_directories().await?; let max_chars = self.config.abstract_config.max_chars; @@ -610,13 +610,13 @@ impl LayerGenerator { let abstract_path = format!("{}/.abstract.md", dir); if let Ok(content) = self.filesystem.read(&abstract_path).await { - // 移除 "Added" 标记后再检查长度 + // Remove "Added" marker before checking length let content_without_metadata = self.strip_metadata(&content); if content_without_metadata.len() > max_chars { stats.total += 1; info!( - "发现超大 .abstract: {} ({} 字符)", + "Found oversized .abstract: {} ({} chars)", dir, content_without_metadata.len() ); @@ -624,11 +624,11 @@ impl LayerGenerator { match self.generate_layers_for_directory(&dir).await { Ok(_) => { stats.regenerated += 1; - info!("✓ 重新生成成功: {}", dir); + info!("Regeneration succeeded: {}", dir); } Err(e) => { stats.failed += 1; - warn!("✗ 重新生成失败: {} - {}", dir, e); + warn!("Regeneration failed: {} - {}", dir, e); } } } @@ -636,7 +636,7 @@ impl LayerGenerator { } info!( - "重新生成完成: 总计 {}, 成功 {}, 失败 {}", + "Regeneration completed: total={}, succeeded={}, failed={}", stats.total, stats.regenerated, stats.failed ); diff --git a/cortex-mem-core/src/automation/manager.rs b/cortex-mem-core/src/automation/manager.rs index 797c9f5..2eef190 100644 --- a/cortex-mem-core/src/automation/manager.rs +++ b/cortex-mem-core/src/automation/manager.rs @@ -90,26 +90,26 @@ impl AutomationManager { // 启动时自动生成缺失的 L0/L1 文件 if self.config.auto_generate_layers_on_startup { if let Some(ref generator) = self.layer_generator { - info!("启动时检查并生成缺失的 L0/L1 文件..."); + info!("Checking and generating missing L0/L1 files on startup..."); let generator_clone = generator.clone(); let semaphore = self.llm_semaphore.clone(); tokio::spawn(async move { - // 获取信号量许可 + // Acquire semaphore permit let _permit = semaphore.acquire().await; match generator_clone.ensure_all_layers().await { Ok(stats) => { info!( - "启动时层级生成完成: 总计 {}, 成功 {}, 失败 {}", + "Startup layer generation completed: total={}, generated={}, failed={}", stats.total, stats.generated, stats.failed ); } Err(e) => { - warn!("启动时层级生成失败: {}", e); + warn!("Startup layer generation failed: {}", e); } } }); } else { - warn!("auto_generate_layers_on_startup 已启用但未设置 layer_generator"); + warn!("auto_generate_layers_on_startup is enabled but layer_generator is not set"); } } diff --git a/cortex-mem-core/src/cascade_layer_updater.rs b/cortex-mem-core/src/cascade_layer_updater.rs index 872d42e..857bbc1 100644 --- a/cortex-mem-core/src/cascade_layer_updater.rs +++ b/cortex-mem-core/src/cascade_layer_updater.rs @@ -790,23 +790,23 @@ impl CascadeLayerUpdater { pub async fn update_all_layers(&self, scope: &MemoryScope, owner_id: &str) -> Result<()> { let root_uri = self.get_scope_root(scope, owner_id); - log::info!("🔄 update_all_layers: 检查根目录 {}", root_uri); + log::info!("update_all_layers: checking root directory {}", root_uri); if !self.filesystem.exists(&root_uri).await? { - log::info!("📂 根目录 {} 不存在,跳过", root_uri); + log::info!("Root directory {} does not exist, skipping", root_uri); return Ok(()); } - log::info!("📂 根目录存在,开始递归更新层级文件..."); + log::info!("Root directory exists, starting recursive layer file update..."); // Walk through all directories and update layers self.update_all_layers_recursive(&root_uri, scope, owner_id).await?; // Update root layers last - log::info!("🔄 开始更新根目录层级文件..."); + log::info!("Starting root directory layer file update..."); self.update_root_layers(scope, owner_id).await?; - log::info!("✅ update_all_layers 完成: {:?}", scope); + log::info!("update_all_layers completed: {:?}", scope); Ok(()) } @@ -820,12 +820,12 @@ impl CascadeLayerUpdater { Box::pin(async move { let entries = self.filesystem.list(dir_uri).await?; - log::info!("📂 update_all_layers_recursive: {} 有 {} 个条目", dir_uri, entries.len()); + log::info!("update_all_layers_recursive: {} has {} entries", dir_uri, entries.len()); // First, process all subdirectories for entry in &entries { if entry.is_directory && !entry.name.starts_with('.') { - log::info!("📂 进入子目录: {}", entry.name); + log::info!(" Entering subdirectory: {}", entry.name); self.update_all_layers_recursive(&entry.uri, scope, owner_id).await?; } } @@ -835,13 +835,13 @@ impl CascadeLayerUpdater { !e.is_directory && !e.name.starts_with('.') && e.name.ends_with(".md") }); - log::info!("📂 目录 {} 是否有内容文件: {}", dir_uri, has_content); + log::info!("Directory {} has content files: {}", dir_uri, has_content); if has_content { - log::info!("🔄 开始为目录 {} 生成层级文件...", dir_uri); + log::info!("Starting layer file generation for directory {}...", dir_uri); match self.update_directory_layers(dir_uri, scope, owner_id).await { - Ok(_) => log::info!("✅ 目录 {} 层级文件生成成功", dir_uri), - Err(e) => log::warn!("⚠️ 目录 {} 层级文件生成失败: {}", dir_uri, e), + Ok(_) => log::info!("Directory {} layer files generated successfully", dir_uri), + Err(e) => log::warn!("Directory {} layer file generation failed: {}", dir_uri, e), } } diff --git a/cortex-mem-core/src/layers/generator.rs b/cortex-mem-core/src/layers/generator.rs index 5267319..fd379cf 100644 --- a/cortex-mem-core/src/layers/generator.rs +++ b/cortex-mem-core/src/layers/generator.rs @@ -16,7 +16,7 @@ impl AbstractGenerator { /// Generate abstract from content using LLM (mandatory) pub async fn generate_with_llm(&self, content: &str, llm: &Arc) -> Result { - info!("📝 生成 L0 Abstract (内容长度: {} 字符)", content.len()); + info!("Generating L0 Abstract (content length: {} chars)", content.len()); let system = r#"You are an expert at creating concise abstracts. Your goal is to generate summaries that capture multiple key aspects of content for quick relevance checking. @@ -24,11 +24,11 @@ Keep abstracts under 100 tokens. Prioritize breadth over depth - cover more topi Be direct and informative. Use compact phrasing to maximize information density."#; let prompt = crate::llm::prompts::Prompts::abstract_generation(content); - debug!("L0 Abstract prompt 长度: {} 字符", prompt.len()); + debug!("L0 Abstract prompt length: {} chars", prompt.len()); let result = llm.complete_with_system(system, &prompt).await?; - info!("✅ L0 Abstract 生成完成 ({} 字符)", result.len()); + info!("L0 Abstract generated ({} chars)", result.len()); Ok(result) } @@ -58,18 +58,18 @@ impl OverviewGenerator { /// Generate overview from content using LLM (mandatory) pub async fn generate_with_llm(&self, content: &str, llm: &Arc) -> Result { - info!("📄 生成 L1 Overview (内容长度: {} 字符)", content.len()); + info!("Generating L1 Overview (content length: {} chars)", content.len()); let system = r#"You are an expert at creating structured overviews. Your goal is to provide comprehensive yet concise summaries (500-2000 tokens) that help users understand and make decisions about content. Use clear markdown structure with sections for Summary, Core Topics, Key Points, Entities, and Context."#; let prompt = crate::llm::prompts::Prompts::overview_generation(content); - debug!("L1 Overview prompt 长度: {} 字符", prompt.len()); + debug!("L1 Overview prompt length: {} chars", prompt.len()); let result = llm.complete_with_system(system, &prompt).await?; - info!("✅ L1 Overview 生成完成 ({} 字符)", result.len()); + info!("L1 Overview generated ({} chars)", result.len()); Ok(result) } } \ No newline at end of file diff --git a/cortex-mem-core/src/llm/client.rs b/cortex-mem-core/src/llm/client.rs index 4c534b1..ec64d2b 100644 --- a/cortex-mem-core/src/llm/client.rs +++ b/cortex-mem-core/src/llm/client.rs @@ -141,8 +141,8 @@ impl LLMClientImpl { pub async fn complete(&self, prompt: &str) -> Result { use rig::completion::Prompt; - tracing::info!("🔄 LLM 调用开始 [模型: {}]", self.config.model_efficient); - tracing::debug!("📝 Prompt 长度: {} 字符", prompt.len()); + tracing::info!("LLM call started [model: {}]", self.config.model_efficient); + tracing::debug!("Prompt length: {} chars", prompt.len()); let start = std::time::Instant::now(); @@ -153,7 +153,7 @@ impl LLMClientImpl { .map_err(|e| crate::Error::Llm(format!("LLM completion failed: {}", e)))?; let elapsed = start.elapsed(); - tracing::info!("✅ LLM 调用完成 [耗时: {:.2}s, 响应: {} 字符]", elapsed.as_secs_f64(), response.len()); + tracing::info!("LLM call completed [elapsed: {:.2}s, response: {} chars]", elapsed.as_secs_f64(), response.len()); Ok(response) } @@ -162,8 +162,8 @@ impl LLMClientImpl { pub async fn complete_with_system(&self, system: &str, prompt: &str) -> Result { use rig::completion::Prompt; - tracing::info!("🔄 LLM 调用开始 (with system) [模型: {}]", self.config.model_efficient); - tracing::debug!("📝 System: {}..., Prompt 长度: {} 字符", + tracing::info!("LLM call started (with system) [model: {}]", self.config.model_efficient); + tracing::debug!("System: {}..., Prompt length: {} chars", &system.chars().take(50).collect::(), prompt.len()); let start = std::time::Instant::now(); @@ -175,7 +175,7 @@ impl LLMClientImpl { .map_err(|e| crate::Error::Llm(format!("LLM completion failed: {}", e)))?; let elapsed = start.elapsed(); - tracing::info!("✅ LLM 调用完成 [耗时: {:.2}s, 响应: {} 字符]", elapsed.as_secs_f64(), response.len()); + tracing::info!("LLM call completed [elapsed: {:.2}s, response: {} chars]", elapsed.as_secs_f64(), response.len()); Ok(response) } @@ -287,8 +287,8 @@ impl LLMClient for LLMClientImpl { async fn complete(&self, prompt: &str) -> Result { use rig::completion::Prompt; - tracing::info!("🔄 LLM 调用开始 [模型: {}]", self.config.model_efficient); - tracing::debug!("📝 Prompt 长度: {} 字符", prompt.len()); + tracing::info!("LLM call started [model: {}]", self.config.model_efficient); + tracing::debug!("Prompt length: {} chars", prompt.len()); let start = std::time::Instant::now(); @@ -299,7 +299,7 @@ impl LLMClient for LLMClientImpl { .map_err(|e| crate::Error::Llm(format!("LLM completion failed: {}", e)))?; let elapsed = start.elapsed(); - tracing::info!("✅ LLM 调用完成 [耗时: {:.2}s, 响应: {} 字符]", elapsed.as_secs_f64(), response.len()); + tracing::info!("LLM call completed [elapsed: {:.2}s, response: {} chars]", elapsed.as_secs_f64(), response.len()); Ok(response) } @@ -307,8 +307,8 @@ impl LLMClient for LLMClientImpl { async fn complete_with_system(&self, system: &str, prompt: &str) -> Result { use rig::completion::Prompt; - tracing::info!("🔄 LLM 调用开始 (with system) [模型: {}]", self.config.model_efficient); - tracing::debug!("📝 System: {}..., Prompt 长度: {} 字符", + tracing::info!("LLM call started (with system) [model: {}]", self.config.model_efficient); + tracing::debug!("System: {}..., Prompt length: {} chars", &system.chars().take(50).collect::(), prompt.len()); let start = std::time::Instant::now(); @@ -320,7 +320,7 @@ impl LLMClient for LLMClientImpl { .map_err(|e| crate::Error::Llm(format!("LLM completion failed: {}", e)))?; let elapsed = start.elapsed(); - tracing::info!("✅ LLM 调用完成 [耗时: {:.2}s, 响应: {} 字符]", elapsed.as_secs_f64(), response.len()); + tracing::info!("LLM call completed [elapsed: {:.2}s, response: {} chars]", elapsed.as_secs_f64(), response.len()); Ok(response) } diff --git a/cortex-mem-core/src/memory_event_coordinator.rs b/cortex-mem-core/src/memory_event_coordinator.rs index 76d03dc..650919a 100644 --- a/cortex-mem-core/src/memory_event_coordinator.rs +++ b/cortex-mem-core/src/memory_event_coordinator.rs @@ -300,34 +300,34 @@ impl MemoryEventCoordinator { /// * `true` - 所有任务已完成 /// * `false` - 在等待过程中有新任务产生(通常不应该发生) pub async fn flush_and_wait(&self, check_interval: Duration) -> bool { - log::info!("🔄 开始刷新并等待所有任务完成..."); + log::info!("Starting flush and waiting for all tasks to complete..."); let start = std::time::Instant::now(); - let max_wait = Duration::from_secs(300); // 最大等待 5 分钟 + let max_wait = Duration::from_secs(300); // Max wait 5 minutes - // 阶段0:让出运行时,让事件循环有机会运行 - // 这是关键:tokio::task::yield_now() 让其他任务有机会执行 - log::info!("⏳ 阶段0:让出运行时,等待事件被取出..."); + // Phase 0: Yield runtime to let event loop have a chance to run + // This is critical: tokio::task::yield_now() lets other tasks execute + log::info!("Phase 0: Yielding runtime, waiting for events to be processed..."); for i in 0..10 { tokio::task::yield_now().await; tokio::time::sleep(Duration::from_millis(10)).await; let pending = self.pending_tasks.load(Ordering::SeqCst); if pending > 0 { - log::info!("✅ 阶段0完成:检测到 {} 个任务开始处理", pending); + log::info!("Phase 0 completed: detected {} tasks started processing", pending); break; } if i == 9 { - log::info!("ℹ️ 阶段0完成:无待处理任务检测到"); + log::info!("Phase 0 completed: no pending tasks detected"); } } - // 阶段1:等待当前事件处理完成 + // Phase 1: Wait for current event processing to complete loop { let pending = self.pending_tasks.load(Ordering::SeqCst); if pending == 0 { - // 等待一小段时间,看是否有新事件被取出 + // Wait a short time to see if new events are being processed tokio::time::sleep(Duration::from_millis(100)).await; let pending_after = self.pending_tasks.load(Ordering::SeqCst); if pending_after == 0 { @@ -336,75 +336,75 @@ impl MemoryEventCoordinator { continue; } - // 检查是否超时 + // Check if timed out if start.elapsed() >= max_wait { - log::warn!("⚠️ 等待超时,仍有 {} 个任务未完成", pending); + log::warn!("Wait timeout, still have {} tasks not completed", pending); return false; } log::trace!( - "⏳ 等待 {} 个事件处理任务完成...(已等待 {:?})", + "Waiting for {} event processing tasks... (elapsed: {:?})", pending, start.elapsed() ); tokio::time::sleep(check_interval).await; } - log::info!("✅ 阶段1完成:事件处理任务已清空"); + log::info!("Phase 1 completed: event processing tasks cleared"); - // 阶段2:刷新 debouncer 中的待处理更新 + // Phase 2: Flush pending updates in debouncer if let Some(ref debouncer) = self.debouncer { let pending_count = debouncer.pending_count().await; if pending_count > 0 { log::info!( - "🔄 阶段2:刷新 {} 个 debouncer 待处理更新...", + "Phase 2: Flushing {} debouncer pending updates...", pending_count ); let flushed = debouncer.flush_all(&self.layer_updater).await; - log::info!("✅ 阶段2完成:已刷新 {} 个层级更新", flushed); + log::info!("Phase 2 completed: flushed {} layer updates", flushed); } else { - log::info!("✅ 阶段2完成:debouncer 无待处理更新"); + log::info!("Phase 2 completed: debouncer has no pending updates"); } } else { - log::info!("✅ 阶段2跳过:debouncer 未启用"); + log::info!("Phase 2 skipped: debouncer not enabled"); } - // 阶段3:再次等待,确保 debouncer 刷新产生的任务也完成 + // Phase 3: Wait again to ensure tasks from debouncer flush also complete loop { let pending = self.pending_tasks.load(Ordering::SeqCst); if pending == 0 { break; } - // 检查是否超时 + // Check if timed out if start.elapsed() >= max_wait { - log::warn!("⚠️ 等待超时,仍有 {} 个任务未完成", pending); + log::warn!("Wait timeout, still have {} tasks not completed", pending); return false; } log::info!( - "⏳ 等待 {} 个刷新后任务完成...(已等待 {:?})", + "Waiting for {} post-flush tasks... (elapsed: {:?})", pending, start.elapsed() ); tokio::time::sleep(check_interval).await; } - log::info!("✅ 阶段3完成:所有任务已清空"); + log::info!("Phase 3 completed: all tasks cleared"); log::info!( - "🎉 flush_and_wait 完成:所有任务和层级更新已处理(耗时 {:?})", + "flush_and_wait completed: all tasks and layer updates processed (elapsed: {:?})", start.elapsed() ); true } - /// 等待所有后台任务完成 + /// Wait for all background tasks to complete /// /// # Arguments - /// * `timeout` - 最大等待时间 + /// * `timeout` - Maximum wait time /// /// # Returns - /// * `true` - 所有任务已完成 - /// * `false` - 超时 + /// * `true` - All tasks completed + /// * `false` - Timeout pub async fn wait_for_completion(&self, timeout: Duration) -> bool { let start = std::time::Instant::now(); let check_interval = Duration::from_millis(500); @@ -412,31 +412,31 @@ impl MemoryEventCoordinator { loop { let pending = self.pending_tasks.load(Ordering::SeqCst); - // 如果没有待处理任务,返回成功 + // If no pending tasks, return success if pending == 0 { - // 额外等待一小段时间,确保没有新任务刚刚提交 + // Wait a short time to ensure no new tasks just submitted tokio::time::sleep(Duration::from_millis(200)).await; let pending_after = self.pending_tasks.load(Ordering::SeqCst); if pending_after == 0 { - log::info!("✅ 所有后台任务已完成"); + log::info!("All background tasks completed"); return true; } - // 有新任务提交,继续等待 + // New tasks submitted, continue waiting continue; } - // 检查是否超时 + // Check if timed out if start.elapsed() >= timeout { - log::warn!("⚠️ 等待后台任务超时,仍有 {} 个任务未完成", pending); + log::warn!("Wait for background tasks timeout, still have {} tasks not completed", pending); return false; } - // 首次打印等待日志 + // Print wait log on first time if start.elapsed() < Duration::from_millis(600) { - log::info!("⏳ 等待 {} 个后台任务完成...", pending); + log::info!("Waiting for {} background tasks to complete...", pending); } - // 等待一小段时间再检查 + // Wait a short time before checking again tokio::time::sleep(check_interval).await; } } @@ -766,9 +766,9 @@ impl MemoryEventCoordinator { session_id, user_result.created, user_result.updated ); - // 注意:不在这里调用 update_all_layers,因为它是长时间运行的操作 - // 会阻塞事件处理循环。改为在退出流程中显式调用 generate_user_agent_layers - log::info!("📝 记忆已写入,退出时应调用 generate_user_agent_layers 生成层级文件"); + // Note: Not calling update_all_layers here as it's a long-running operation + // that would block the event processing loop. Instead, call generate_user_agent_layers explicitly during exit flow + log::info!("Memory written, should call generate_user_agent_layers during exit to generate layer files"); } else { log::info!("⚠️ No memories extracted from session {}", session_id); } diff --git a/cortex-mem-core/src/session/extraction.rs b/cortex-mem-core/src/session/extraction.rs index 0f9b540..8593577 100644 --- a/cortex-mem-core/src/session/extraction.rs +++ b/cortex-mem-core/src/session/extraction.rs @@ -172,17 +172,17 @@ impl MemoryExtractor { return Ok(ExtractedMemories::default()); } - tracing::info!("🧠 开始从 {} 条消息中提取记忆", messages.len()); + tracing::info!("Starting memory extraction from {} messages", messages.len()); let prompt = self.build_extraction_prompt(messages); - tracing::debug!("📝 记忆提取 prompt 长度: {} 字符", prompt.len()); + tracing::debug!("Memory extraction prompt length: {} chars", prompt.len()); let response = self.llm_client.complete(&prompt).await?; let memories = self.parse_extraction_response(&response)?; tracing::info!( - "✅ 记忆提取完成: 偏好={}, 实体={}, 事件={}, 案例={}, 个人信息={}, 工作经历={}, 关系={}, 目标={}", + "Memory extraction completed: preferences={}, entities={}, events={}, cases={}, personal_info={}, work_history={}, relationships={}, goals={}", memories.preferences.len(), memories.entities.len(), memories.events.len(), diff --git a/cortex-mem-tools/src/operations.rs b/cortex-mem-tools/src/operations.rs index ec18fd8..712c5ea 100644 --- a/cortex-mem-tools/src/operations.rs +++ b/cortex-mem-tools/src/operations.rs @@ -568,17 +568,17 @@ impl MemoryOperations { Ok(exists) } - /// 生成所有缺失的 L0/L1 层级文件(用于退出时调用) + /// Generate all missing L0/L1 layer files (for calling during exit) /// - /// 这个方法扫描所有目录,找出缺失 .abstract.md 或 .overview.md 的目录, - /// 并批量生成它们。适合在应用退出时调用。 + /// This method scans all directories, finds those missing .abstract.md or .overview.md, + /// and generates them in batches. Suitable for calling during application exit. pub async fn ensure_all_layers(&self) -> Result { if let Some(ref generator) = self.layer_generator { - tracing::info!("🔍 开始扫描并生成缺失的 L0/L1 层级文件..."); + tracing::info!("Starting scan and generation of missing L0/L1 layer files..."); match generator.ensure_all_layers().await { Ok(stats) => { tracing::info!( - "✅ L0/L1 层级生成完成: 总计 {}, 成功 {}, 失败 {}", + "L0/L1 layer generation completed: total={}, generated={}, failed={}", stats.total, stats.generated, stats.failed @@ -586,34 +586,34 @@ impl MemoryOperations { Ok(stats) } Err(e) => { - tracing::error!("❌ L0/L1 层级生成失败: {}", e); + tracing::error!("L0/L1 layer generation failed: {}", e); Err(e.into()) } } } else { - tracing::warn!("⚠️ LayerGenerator 未配置,跳过层级生成"); + tracing::warn!("LayerGenerator not configured, skipping layer generation"); Ok(cortex_mem_core::automation::GenerationStats::default()) } } - /// 为特定session生成 L0/L1 层级文件 + /// Generate L0/L1 layer files for a specific session /// # Arguments - /// * `session_id` - 会话ID + /// * `session_id` - Session ID /// /// # Returns - /// 返回生成统计信息 + /// Returns generation statistics pub async fn ensure_session_layers( &self, session_id: &str, ) -> Result { if let Some(ref generator) = self.layer_generator { let timeline_uri = format!("cortex://session/{}/timeline", session_id); - tracing::info!("🔍 为会话 {} 生成 L0/L1 层级文件", session_id); + tracing::info!("Generating L0/L1 layer files for session {}", session_id); match generator.ensure_timeline_layers(&timeline_uri).await { Ok(stats) => { tracing::info!( - "✅ 会话 {} L0/L1 层级生成完成: 总计 {}, 成功 {}, 失败 {}", + "Session {} L0/L1 layer generation completed: total={}, generated={}, failed={}", session_id, stats.total, stats.generated, @@ -622,37 +622,37 @@ impl MemoryOperations { Ok(stats) } Err(e) => { - tracing::error!("❌ 会话 {} L0/L1 层级生成失败: {}", session_id, e); + tracing::error!("Session {} L0/L1 layer generation failed: {}", session_id, e); Err(e.into()) } } } else { - tracing::warn!("⚠️ LayerGenerator 未配置,跳过层级生成"); + tracing::warn!("LayerGenerator not configured, skipping layer generation"); Ok(cortex_mem_core::automation::GenerationStats::default()) } } - /// 索引所有文件到向量数据库(用于退出时调用) - /// 这个方法扫描所有文件,包括新生成的 .abstract.md 和 .overview.md, - /// 并将它们索引到向量数据库中。适合在应用退出时调用。 + /// Index all files to vector database (for calling during exit) + /// This method scans all files, including newly generated .abstract.md and .overview.md, + /// and indexes them to the vector database. Suitable for calling during application exit. pub async fn index_all_files(&self) -> Result { - tracing::info!("📊 开始索引所有文件到向量数据库..."); + tracing::info!("Starting to index all files to vector database..."); use cortex_mem_core::automation::{SyncConfig, SyncManager}; - // 创建 SyncManager + // Create SyncManager let sync_manager = SyncManager::new( self.filesystem.clone(), self.embedding_client.clone(), self.vector_store.clone(), - self.llm_client.clone(), // 不需要 Option + self.llm_client.clone(), // Not optional SyncConfig::default(), ); match sync_manager.sync_all().await { Ok(stats) => { tracing::info!( - "✅ 索引完成: 总计 {} 个文件, {} 个已索引, {} 个跳过, {} 个错误", + "Indexing completed: {} total files, {} indexed, {} skipped, {} errors", stats.total_files, stats.indexed_files, stats.skipped_files, @@ -661,28 +661,28 @@ impl MemoryOperations { Ok(stats) } Err(e) => { - tracing::error!("❌ 索引失败: {}", e); + tracing::error!("Indexing failed: {}", e); Err(e.into()) } } } - /// 为特定session索引文件到向量数据库 + /// Index files to vector database for a specific session /// /// # Arguments - /// * `session_id` - 会话ID + /// * `session_id` - Session ID /// /// # Returns - /// 返回索引统计信息 + /// Returns indexing statistics pub async fn index_session_files( &self, session_id: &str, ) -> Result { - tracing::info!("📊 开始为会话 {} 索引文件到向量数据库...", session_id); + tracing::info!("Starting to index files to vector database for session {}...", session_id); use cortex_mem_core::automation::{SyncConfig, SyncManager}; - // 创建 SyncManager + // Create SyncManager let sync_manager = SyncManager::new( self.filesystem.clone(), self.embedding_client.clone(), @@ -691,13 +691,13 @@ impl MemoryOperations { SyncConfig::default(), ); - // 限定扫描范围到特定session + // Limit scan scope to specific session let session_uri = format!("cortex://session/{}", session_id); match sync_manager.sync_specific_path(&session_uri).await { Ok(stats) => { tracing::info!( - "✅ 会话 {} 索引完成: 总计 {} 个文件, {} 个已索引, {} 个跳过, {} 个错误", + "Session {} indexing completed: {} total files, {} indexed, {} skipped, {} errors", session_id, stats.total_files, stats.indexed_files, @@ -707,7 +707,7 @@ impl MemoryOperations { Ok(stats) } Err(e) => { - tracing::error!("❌ 会话 {} 索引失败: {}", session_id, e); + tracing::error!("Session {} indexing failed: {}", session_id, e); Err(e.into()) } } @@ -737,8 +737,8 @@ impl MemoryOperations { .wait_for_completion(Duration::from_secs(max_wait_secs)) .await } else { - // 降级:如果没有 coordinator,使用简单的等待 - warn!("⚠️ MemoryEventCoordinator 未初始化,使用简单等待"); + // Fallback: if no coordinator, use simple wait + warn!("MemoryEventCoordinator not initialized, using simple wait"); tokio::time::sleep(Duration::from_secs(max_wait_secs.min(5))).await; true } @@ -762,7 +762,7 @@ impl MemoryOperations { if let Some(ref coordinator) = self.event_coordinator { coordinator.flush_and_wait(interval).await } else { - warn!("⚠️ MemoryEventCoordinator 未初始化,跳过等待"); + warn!("MemoryEventCoordinator not initialized, skipping wait"); true } } @@ -801,13 +801,13 @@ impl MemoryOperations { tracing::info!("Manual trigger processing for {:?}/{}", memory_scope, owner_id); - // 强制更新层级 + // Force update layers if let Some(ref coordinator) = self.event_coordinator { coordinator .force_full_update(&memory_scope, owner_id) .await?; } else { - warn!("MemoryEventCoordinator 未初始化,无法触发处理"); + warn!("MemoryEventCoordinator not initialized, cannot trigger processing"); return Ok(ProcessingResult::default()); } From 4ae534f02ff54e43269534b871457c7ba7600449 Mon Sep 17 00:00:00 2001 From: Sopaco Date: Sun, 8 Mar 2026 23:01:41 +0800 Subject: [PATCH 04/20] Remove log dependency and refactor logging calls to use tracing The `log` crate has been removed from core modules and all logging calls have been updated to use the `tracing` crate instead, with appropriate log level adjustments. --- Cargo.lock | 2 - cortex-mem-core/Cargo.toml | 1 - .../src/automation/layer_generator.rs | 51 ++----- cortex-mem-core/src/cascade_layer_updater.rs | 21 ++- .../src/memory_event_coordinator.rs | 134 ++++-------------- cortex-mem-core/src/session/manager.rs | 7 +- cortex-mem-service/src/main.rs | 2 +- cortex-mem-tools/Cargo.toml | 1 - 8 files changed, 56 insertions(+), 163 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 43c9064..1f11962 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -611,7 +611,6 @@ dependencies = [ "cortex-mem-config", "dyn-clone", "futures", - "log", "qdrant-client", "regex", "reqwest 0.12.24", @@ -741,7 +740,6 @@ dependencies = [ "chrono", "cortex-mem-core", "futures", - "log", "serde", "serde_json", "tempfile", diff --git a/cortex-mem-core/Cargo.toml b/cortex-mem-core/Cargo.toml index f79add2..1a307e2 100644 --- a/cortex-mem-core/Cargo.toml +++ b/cortex-mem-core/Cargo.toml @@ -24,7 +24,6 @@ tracing-subscriber = { workspace = true } walkdir = { workspace = true } rig-core = { workspace = true } reqwest = { workspace = true } -log = "0.4" # Additional dependencies regex = "1.10" diff --git a/cortex-mem-core/src/automation/layer_generator.rs b/cortex-mem-core/src/automation/layer_generator.rs index b1419ea..5ff6a3e 100644 --- a/cortex-mem-core/src/automation/layer_generator.rs +++ b/cortex-mem-core/src/automation/layer_generator.rs @@ -103,23 +103,22 @@ impl LayerGenerator { // Check if scope exists match self.filesystem.exists(&scope_uri).await { Ok(true) => { - log::info!("Scanning scope: {} ({})", scope, scope_uri); + debug!("Scanning scope: {}", scope); match self.scan_scope(&scope_uri).await { Ok(dirs) => { - log::info!("Scope {} found {} directories", scope, dirs.len()); + debug!("Scope {} found {} directories", scope, dirs.len()); directories.extend(dirs); } Err(e) => { - log::warn!("Failed to scan scope {}: {}", scope, e); warn!("Failed to scan scope {}: {}", scope, e); } } } Ok(false) => { - log::info!("Scope {} does not exist, skipping", scope); + debug!("Scope {} does not exist, skipping", scope); } Err(e) => { - log::warn!("Failed to check scope {} existence: {}", scope, e); + warn!("Failed to check scope {} existence: {}", scope, e); } } } @@ -134,14 +133,14 @@ impl LayerGenerator { // First check if scope exists match self.filesystem.exists(scope_uri).await { Ok(true) => { - log::info!("Scope directory exists: {}", scope_uri); + debug!("Scope directory exists: {}", scope_uri); } Ok(false) => { - log::info!("Scope directory does not exist: {}", scope_uri); + debug!("Scope directory does not exist: {}", scope_uri); return Ok(directories); } Err(e) => { - log::warn!("Failed to check scope existence: {} - {}", scope_uri, e); + warn!("Failed to check scope existence: {} - {}", scope_uri, e); return Ok(directories); } } @@ -149,13 +148,10 @@ impl LayerGenerator { // Try to list directory contents match self.filesystem.list(scope_uri).await { Ok(entries) => { - log::info!("Scope {} has {} entries", scope_uri, entries.len()); - for entry in &entries { - log::info!(" - {} (is_dir: {})", entry.name, entry.is_directory); - } + debug!("Scope {} has {} entries", scope_uri, entries.len()); } Err(e) => { - log::warn!("Failed to list scope directory: {} - {}", scope_uri, e); + warn!("Failed to list scope directory: {} - {}", scope_uri, e); return Ok(directories); } } @@ -232,29 +228,16 @@ impl LayerGenerator { /// Ensure all directories have L0/L1 pub async fn ensure_all_layers(&self) -> Result { - log::info!("Starting directory scan..."); - info!("Starting directory scan..."); + info!("Scanning directories for missing L0/L1 layers..."); let directories = self.scan_all_directories().await?; - log::info!("Found {} directories", directories.len()); - info!("Found {} directories", directories.len()); + debug!("Found {} directories", directories.len()); - // Debug: print scanned directories for dir in &directories { - log::debug!("Scanned directory: {}", dir); debug!("Scanned directory: {}", dir); } - log::info!("Detecting missing L0/L1..."); - info!("Detecting missing L0/L1..."); let missing = self.filter_missing_layers(&directories).await?; - log::info!("Found {} directories missing L0/L1", missing.len()); info!("Found {} directories missing L0/L1", missing.len()); - - // Debug: print directories missing layer files - for dir in &missing { - log::info!("Need to generate layer files for: {}", dir); - info!("Need to generate layer files for: {}", dir); - } if missing.is_empty() { return Ok(GenerationStats { @@ -274,20 +257,17 @@ impl LayerGenerator { let total_batches = (missing.len() + self.config.batch_size - 1) / self.config.batch_size; for (batch_idx, batch) in missing.chunks(self.config.batch_size).enumerate() { - log::info!("Processing batch {}/{}", batch_idx + 1, total_batches); - info!("Processing batch {}/{}", batch_idx + 1, total_batches); + debug!("Processing batch {}/{}", batch_idx + 1, total_batches); for dir in batch { match self.generate_layers_for_directory(dir).await { Ok(_) => { stats.generated += 1; - log::info!("Generation succeeded: {}", dir); - info!("Generation succeeded: {}", dir); + debug!("Generated: {}", dir); } Err(e) => { stats.failed += 1; - log::warn!("Generation failed: {} - {}", dir, e); - warn!("Generation failed: {} - {}", dir, e); + warn!("Failed to generate for {}: {}", dir, e); } } } @@ -298,8 +278,7 @@ impl LayerGenerator { } } - log::info!("Generation completed: {} succeeded, {} failed", stats.generated, stats.failed); - info!("Generation completed: {} succeeded, {} failed", stats.generated, stats.failed); + info!("Layer generation completed: {} generated, {} failed", stats.generated, stats.failed); Ok(stats) } diff --git a/cortex-mem-core/src/cascade_layer_updater.rs b/cortex-mem-core/src/cascade_layer_updater.rs index 857bbc1..a0e5c8d 100644 --- a/cortex-mem-core/src/cascade_layer_updater.rs +++ b/cortex-mem-core/src/cascade_layer_updater.rs @@ -19,7 +19,7 @@ use std::collections::hash_map::DefaultHasher; use std::hash::{Hash, Hasher}; use std::sync::Arc; use tokio::sync::{mpsc, RwLock}; -use tracing::{debug, info}; +use tracing::{debug, info, warn}; /// Update statistics for monitoring optimization effectiveness #[derive(Debug, Clone, Default)] @@ -790,23 +790,22 @@ impl CascadeLayerUpdater { pub async fn update_all_layers(&self, scope: &MemoryScope, owner_id: &str) -> Result<()> { let root_uri = self.get_scope_root(scope, owner_id); - log::info!("update_all_layers: checking root directory {}", root_uri); + debug!("Checking root directory: {}", root_uri); if !self.filesystem.exists(&root_uri).await? { - log::info!("Root directory {} does not exist, skipping", root_uri); + debug!("Root directory {} does not exist, skipping", root_uri); return Ok(()); } - log::info!("Root directory exists, starting recursive layer file update..."); + debug!("Starting recursive layer update..."); // Walk through all directories and update layers self.update_all_layers_recursive(&root_uri, scope, owner_id).await?; // Update root layers last - log::info!("Starting root directory layer file update..."); self.update_root_layers(scope, owner_id).await?; - log::info!("update_all_layers completed: {:?}", scope); + info!("Layer update completed for {:?}", scope); Ok(()) } @@ -820,12 +819,11 @@ impl CascadeLayerUpdater { Box::pin(async move { let entries = self.filesystem.list(dir_uri).await?; - log::info!("update_all_layers_recursive: {} has {} entries", dir_uri, entries.len()); + debug!("Directory {} has {} entries", dir_uri, entries.len()); // First, process all subdirectories for entry in &entries { if entry.is_directory && !entry.name.starts_with('.') { - log::info!(" Entering subdirectory: {}", entry.name); self.update_all_layers_recursive(&entry.uri, scope, owner_id).await?; } } @@ -835,13 +833,10 @@ impl CascadeLayerUpdater { !e.is_directory && !e.name.starts_with('.') && e.name.ends_with(".md") }); - log::info!("Directory {} has content files: {}", dir_uri, has_content); - if has_content { - log::info!("Starting layer file generation for directory {}...", dir_uri); match self.update_directory_layers(dir_uri, scope, owner_id).await { - Ok(_) => log::info!("Directory {} layer files generated successfully", dir_uri), - Err(e) => log::warn!("Directory {} layer file generation failed: {}", dir_uri, e), + Ok(_) => debug!("Layer files generated for {}", dir_uri), + Err(e) => warn!("Layer generation failed for {}: {}", dir_uri, e), } } diff --git a/cortex-mem-core/src/memory_event_coordinator.rs b/cortex-mem-core/src/memory_event_coordinator.rs index 650919a..c5a3f17 100644 --- a/cortex-mem-core/src/memory_event_coordinator.rs +++ b/cortex-mem-core/src/memory_event_coordinator.rs @@ -26,7 +26,7 @@ use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; use std::time::Duration; use tokio::sync::{RwLock, mpsc, watch}; -use tracing::{debug, error, info, warn}; +use tracing::{debug, error, info, trace, warn}; /// Configuration for event coordinator #[derive(Debug, Clone)] @@ -300,26 +300,24 @@ impl MemoryEventCoordinator { /// * `true` - 所有任务已完成 /// * `false` - 在等待过程中有新任务产生(通常不应该发生) pub async fn flush_and_wait(&self, check_interval: Duration) -> bool { - log::info!("Starting flush and waiting for all tasks to complete..."); + info!("Flushing and waiting for all tasks..."); let start = std::time::Instant::now(); - let max_wait = Duration::from_secs(300); // Max wait 5 minutes + let max_wait = Duration::from_secs(300); - // Phase 0: Yield runtime to let event loop have a chance to run - // This is critical: tokio::task::yield_now() lets other tasks execute - log::info!("Phase 0: Yielding runtime, waiting for events to be processed..."); + // Phase 0: Yield runtime to let event loop run for i in 0..10 { tokio::task::yield_now().await; tokio::time::sleep(Duration::from_millis(10)).await; let pending = self.pending_tasks.load(Ordering::SeqCst); if pending > 0 { - log::info!("Phase 0 completed: detected {} tasks started processing", pending); + debug!("Detected {} pending tasks", pending); break; } if i == 9 { - log::info!("Phase 0 completed: no pending tasks detected"); + debug!("No pending tasks detected"); } } @@ -327,7 +325,6 @@ impl MemoryEventCoordinator { loop { let pending = self.pending_tasks.load(Ordering::SeqCst); if pending == 0 { - // Wait a short time to see if new events are being processed tokio::time::sleep(Duration::from_millis(100)).await; let pending_after = self.pending_tasks.load(Ordering::SeqCst); if pending_after == 0 { @@ -336,64 +333,42 @@ impl MemoryEventCoordinator { continue; } - // Check if timed out if start.elapsed() >= max_wait { - log::warn!("Wait timeout, still have {} tasks not completed", pending); + warn!("Timeout, {} tasks still pending", pending); return false; } - log::trace!( - "Waiting for {} event processing tasks... (elapsed: {:?})", - pending, - start.elapsed() - ); + trace!("Waiting for {} tasks... (elapsed: {:?})", pending, start.elapsed()); tokio::time::sleep(check_interval).await; } - log::info!("Phase 1 completed: event processing tasks cleared"); + debug!("Event processing tasks cleared"); // Phase 2: Flush pending updates in debouncer if let Some(ref debouncer) = self.debouncer { let pending_count = debouncer.pending_count().await; if pending_count > 0 { - log::info!( - "Phase 2: Flushing {} debouncer pending updates...", - pending_count - ); + info!("Flushing {} debouncer updates", pending_count); let flushed = debouncer.flush_all(&self.layer_updater).await; - log::info!("Phase 2 completed: flushed {} layer updates", flushed); - } else { - log::info!("Phase 2 completed: debouncer has no pending updates"); + debug!("Flushed {} layer updates", flushed); } - } else { - log::info!("Phase 2 skipped: debouncer not enabled"); } - // Phase 3: Wait again to ensure tasks from debouncer flush also complete + // Phase 3: Wait for debouncer flush tasks to complete loop { let pending = self.pending_tasks.load(Ordering::SeqCst); if pending == 0 { break; } - // Check if timed out if start.elapsed() >= max_wait { - log::warn!("Wait timeout, still have {} tasks not completed", pending); + warn!("Timeout, {} tasks still pending", pending); return false; } - log::info!( - "Waiting for {} post-flush tasks... (elapsed: {:?})", - pending, - start.elapsed() - ); tokio::time::sleep(check_interval).await; } - log::info!("Phase 3 completed: all tasks cleared"); - log::info!( - "flush_and_wait completed: all tasks and layer updates processed (elapsed: {:?})", - start.elapsed() - ); + info!("All tasks completed (elapsed: {:?})", start.elapsed()); true } @@ -412,31 +387,25 @@ impl MemoryEventCoordinator { loop { let pending = self.pending_tasks.load(Ordering::SeqCst); - // If no pending tasks, return success if pending == 0 { - // Wait a short time to ensure no new tasks just submitted tokio::time::sleep(Duration::from_millis(200)).await; let pending_after = self.pending_tasks.load(Ordering::SeqCst); if pending_after == 0 { - log::info!("All background tasks completed"); + info!("All background tasks completed"); return true; } - // New tasks submitted, continue waiting continue; } - // Check if timed out if start.elapsed() >= timeout { - log::warn!("Wait for background tasks timeout, still have {} tasks not completed", pending); + warn!("Timeout, {} tasks still pending", pending); return false; } - // Print wait log on first time if start.elapsed() < Duration::from_millis(600) { - log::info!("Waiting for {} background tasks to complete...", pending); + info!("Waiting for {} tasks...", pending); } - // Wait a short time before checking again tokio::time::sleep(check_interval).await; } } @@ -724,28 +693,17 @@ impl MemoryEventCoordinator { user_id: &str, agent_id: &str, ) -> Result<()> { - // 使用 log 以便在 tars 中可见 - log::info!( - "🔄 Processing session closed: {} (user_id={}, agent_id={})", - session_id, - user_id, - agent_id - ); - info!("Processing session closed: {}", session_id); + info!("Processing session closed: {} (user={}, agent={})", session_id, user_id, agent_id); // 1. Extract memories from the session let extracted = self.extract_memories_from_session(session_id).await?; - log::info!( - "🧠 Extracted memories: preferences={}, entities={}, events={}, cases={}, personal_info={}, work_history={}, relationships={}, goals={}", + info!( + "Extracted memories: {} preferences, {} entities, {} events, {} cases", extracted.preferences.len(), extracted.entities.len(), extracted.events.len(), - extracted.cases.len(), - extracted.personal_info.len(), - extracted.work_history.len(), - extracted.relationships.len(), - extracted.goals.len() + extracted.cases.len() ); // 2. Update user memories @@ -755,22 +713,12 @@ impl MemoryEventCoordinator { .update_memories(user_id, agent_id, session_id, &extracted) .await?; - log::info!( - "✅ User memory update for session {}: {} created, {} updated", - session_id, - user_result.created, - user_result.updated - ); info!( - "User memory update for session {}: {} created, {} updated", + "User memory updated for session {}: {} created, {} updated", session_id, user_result.created, user_result.updated ); - - // Note: Not calling update_all_layers here as it's a long-running operation - // that would block the event processing loop. Instead, call generate_user_agent_layers explicitly during exit flow - log::info!("Memory written, should call generate_user_agent_layers during exit to generate layer files"); } else { - log::info!("⚠️ No memories extracted from session {}", session_id); + info!("No memories extracted from session {}", session_id); } // 3. Update timeline layers @@ -782,7 +730,6 @@ impl MemoryEventCoordinator { let timeline_uri = format!("cortex://session/{}/timeline", session_id); self.vector_sync.sync_directory(&timeline_uri).await?; - log::info!("✅ Session {} processing complete", session_id); info!("Session {} processing complete", session_id); Ok(()) @@ -847,66 +794,43 @@ impl MemoryEventCoordinator { /// Extract memories from a session using LLM async fn extract_memories_from_session(&self, session_id: &str) -> Result { - // Collect all messages from the session let timeline_uri = format!("cortex://session/{}/timeline", session_id); - log::info!("📂 Collecting messages from: {}", timeline_uri); - let mut messages = Vec::new(); match self .collect_messages_recursive(&timeline_uri, &mut messages) .await { Ok(_) => { - log::info!("✅ Collected {} messages from session", messages.len()); + debug!("Collected {} messages from session", messages.len()); } Err(e) => { - log::error!("❌ Failed to collect messages: {}", e); + error!("Failed to collect messages: {}", e); return Err(e); } } if messages.is_empty() { - log::warn!("⚠️ No messages found in session {}", session_id); - debug!("No messages found in session {}", session_id); + warn!("No messages found in session {}", session_id); return Ok(ExtractedMemories::default()); } - // Build extraction prompt - log::info!( - "🧠 Building extraction prompt for {} messages...", - messages.len() - ); let prompt = self.build_extraction_prompt(&messages); - // Call LLM for extraction - log::info!("📞 Calling LLM for memory extraction..."); + debug!("Calling LLM for memory extraction..."); let response = match self.llm_client.complete(&prompt).await { Ok(resp) => { - log::info!("✅ LLM response received ({} chars)", resp.len()); + debug!("LLM response received ({} chars)", resp.len()); resp } Err(e) => { - log::error!("❌ LLM call failed: {}", e); + error!("LLM call failed: {}", e); return Err(e); } }; - // Parse response let extracted = self.parse_extraction_response(&response); - log::info!( - "🧠 Extracted memories: preferences={}, entities={}, events={}, cases={}, personal_info={}, work_history={}, relationships={}, goals={}", - extracted.preferences.len(), - extracted.entities.len(), - extracted.events.len(), - extracted.cases.len(), - extracted.personal_info.len(), - extracted.work_history.len(), - extracted.relationships.len(), - extracted.goals.len() - ); - info!( "Extracted {} memories from session {}", extracted.preferences.len() diff --git a/cortex-mem-core/src/session/manager.rs b/cortex-mem-core/src/session/manager.rs index 8c3d6e3..1060072 100644 --- a/cortex-mem-core/src/session/manager.rs +++ b/cortex-mem-core/src/session/manager.rs @@ -4,7 +4,7 @@ use crate::{CortexFilesystem, FilesystemOperations, MessageStorage, ParticipantM use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; use std::sync::Arc; -use tracing::info; +use tracing::{info, warn}; /// Session status #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] @@ -376,9 +376,8 @@ impl SessionManager { thread_id, user_id, agent_id ); } else { - // 使用 log 以便在 tars 中可见 - log::warn!( - "⚠️ memory_event_tx is None, SessionClosed event NOT sent for session {}", + warn!( + "memory_event_tx is None, SessionClosed event NOT sent for session {}", thread_id ); } diff --git a/cortex-mem-service/src/main.rs b/cortex-mem-service/src/main.rs index d2a720c..13fb3f9 100644 --- a/cortex-mem-service/src/main.rs +++ b/cortex-mem-service/src/main.rs @@ -57,7 +57,7 @@ async fn main() -> anyhow::Result<()> { .with(log_level) .init(); - info!("Starting Cortex-Mem Service V2"); + info!("Starting Cortex Memory Service"); info!("Data directory: {}", cli.data_dir); // Initialize application state diff --git a/cortex-mem-tools/Cargo.toml b/cortex-mem-tools/Cargo.toml index 4ae5e41..930c34c 100644 --- a/cortex-mem-tools/Cargo.toml +++ b/cortex-mem-tools/Cargo.toml @@ -18,7 +18,6 @@ serde_json = { workspace = true } anyhow = { workspace = true } thiserror = { workspace = true } tracing = { workspace = true } -log = "0.4" uuid = { workspace = true } chrono = { workspace = true } async-trait = { workspace = true } From 45b60cb11dc3367217972771ae0d737acc005ae6 Mon Sep 17 00:00:00 2001 From: Sopaco Date: Mon, 9 Mar 2026 07:54:08 +0800 Subject: [PATCH 05/20] Update crate publishing script with workspace version support Supports workspace-managed versions in Cargo.toml and adds --auto-confirm flag. Refactors version checking to verify specific versions are published. --- scripts/publish-crates.js | 52 +++++++++++++++++++++++++++++---------- 1 file changed, 39 insertions(+), 13 deletions(-) diff --git a/scripts/publish-crates.js b/scripts/publish-crates.js index d1ec7c0..38d0600 100755 --- a/scripts/publish-crates.js +++ b/scripts/publish-crates.js @@ -25,7 +25,8 @@ function colorize(text, color) { } // Crates to publish in dependency order (excluding cortex-mem-insights which is a web project) -// Order based on dependencies: config -> core -> tools -> rig -> (service, cli, mcp) -> tars +// Order based on dependencies: config -> core -> tools -> rig -> (service, cli, mcp) +// Note: cortex-mem-tars is excluded as it's an example project const CRATES_TO_PUBLISH = [ { name: 'cortex-mem-config', path: 'cortex-mem-config' }, { name: 'cortex-mem-core', path: 'cortex-mem-core' }, @@ -33,14 +34,35 @@ const CRATES_TO_PUBLISH = [ { name: 'cortex-mem-rig', path: 'cortex-mem-rig' }, { name: 'cortex-mem-service', path: 'cortex-mem-service' }, { name: 'cortex-mem-cli', path: 'cortex-mem-cli' }, - { name: 'cortex-mem-mcp', path: 'cortex-mem-mcp' }, - { name: 'cortex-mem-tars', path: 'examples/cortex-mem-tars' } + { name: 'cortex-mem-mcp', path: 'cortex-mem-mcp' } ]; -// Get version from Cargo.toml +// Get workspace version from root Cargo.toml +function getWorkspaceVersion() { + const workspaceCargoPath = path.join(PROJECT_ROOT, 'Cargo.toml'); + const content = fs.readFileSync(workspaceCargoPath, 'utf8'); + // Look for version in [workspace.package] section + const match = content.match(/^\[workspace\.package\][\s\S]*?^version\s*=\s*"([^"]+)"/m); + if (match) return match[1]; + // Fallback: look for standalone version + const fallbackMatch = content.match(/^version\s*=\s*"([^"]+)"/m); + return fallbackMatch ? fallbackMatch[1] : null; +} + +// Cached workspace version +const WORKSPACE_VERSION = getWorkspaceVersion(); + +// Get version from Cargo.toml (supports both standalone and workspace version) function getVersion(cratePath) { const cargoTomlPath = path.join(PROJECT_ROOT, cratePath, 'Cargo.toml'); const content = fs.readFileSync(cargoTomlPath, 'utf8'); + + // Check if using workspace version + if (/version\.workspace\s*=\s*true/.test(content)) { + return WORKSPACE_VERSION; + } + + // Look for standalone version const match = content.match(/^version\s*=\s*"([^"]+)"/m); return match ? match[1] : null; } @@ -164,14 +186,17 @@ function waitForCrateAvailability(crateName, maxWaitSeconds = 120) { }); } -// Check if crate is already published on crates.io using API -function isCratePublished(crateName) { +// Check if a specific version of a crate is already published on crates.io +function isVersionPublished(crateName, version) { try { - // Use curl to check crates.io API directly - execSync(`curl -s -f "https://crates.io/api/v1/crates/${crateName}" > /dev/null`, { - stdio: 'pipe' + // Use curl to check crates.io API and get the newest version + const result = execSync(`curl.exe -s "https://crates.io/api/v1/crates/${crateName}"`, { + stdio: 'pipe', + encoding: 'utf8' }); - return true; + const data = JSON.parse(result); + const newestVersion = data.crate?.newest_version || data.crate?.max_version; + return newestVersion === version; } catch (error) { return false; } @@ -187,6 +212,7 @@ async function main() { const dryRun = args.includes('--dry-run'); const skipWait = args.includes('--skip-wait'); const force = args.includes('--force'); // New flag to force republish + const autoConfirm = args.includes('--yes') || args.includes('-y'); // Auto-confirm flag if (dryRun) { console.log(colorize('\n⚠️ DRY RUN MODE - No actual publishing will occur', 'yellow')); @@ -200,7 +226,7 @@ async function main() { console.log(colorize('\n📦 Crates to publish (in dependency order):', 'blue')); CRATES_TO_PUBLISH.forEach((crate, index) => { const version = getVersion(crate.path); - const published = isCratePublished(crate.name); + const published = isVersionPublished(crate.name, version); console.log( ` ${index + 1}. ${colorize(crate.name, published ? 'yellow' : 'green')} v${version} ${published ? '(already published)' : ''}` ); @@ -209,7 +235,7 @@ async function main() { console.log(colorize('\n' + '='.repeat(60), 'cyan')); // Ask for confirmation - if (!dryRun) { + if (!dryRun && !autoConfirm) { console.log(colorize('\n⚠️ This will publish the above crates to crates.io', 'yellow')); console.log(colorize('Press Ctrl+C to cancel, or press Enter to continue...', 'yellow')); await new Promise((resolve) => { @@ -226,7 +252,7 @@ async function main() { const version = getVersion(crate.path); // Skip if already published (unless force mode) - if (!force && isCratePublished(crate.name)) { + if (!force && isVersionPublished(crate.name, version)) { console.log( colorize( `\n⏭️ [${i + 1}/${CRATES_TO_PUBLISH.length}] Skipping ${crate.name} v${version} - already published`, From 52e0244e3618f35ab74564ea1387a1466d988848 Mon Sep 17 00:00:00 2001 From: Sopaco Date: Mon, 9 Mar 2026 08:10:20 +0800 Subject: [PATCH 06/20] Replace curl with native HTTPS for cross-platform compatibility Switch from using curl commands to native Node.js HTTPS module for checking crate availability on crates.io. This removes the dependency on curl and improves cross-platform support, especially on Windows. --- scripts/publish-crates.js | 47 ++++++++++++++++++++++++++------------- 1 file changed, 31 insertions(+), 16 deletions(-) diff --git a/scripts/publish-crates.js b/scripts/publish-crates.js index 38d0600..7b94a0b 100755 --- a/scripts/publish-crates.js +++ b/scripts/publish-crates.js @@ -156,6 +156,26 @@ function publishCrate(cratePath, dryRun = false) { } } +// Helper: Make HTTP GET request (cross-platform, no curl dependency) +function httpGet(url) { + return new Promise((resolve, reject) => { + const https = require('https'); + https + .get(url, (res) => { + let data = ''; + res.on('data', (chunk) => (data += chunk)); + res.on('end', () => { + if (res.statusCode >= 200 && res.statusCode < 300) { + resolve(data); + } else { + reject(new Error(`HTTP ${res.statusCode}`)); + } + }); + }) + .on('error', reject); + }); +} + // Wait for a crate to be available on crates.io function waitForCrateAvailability(crateName, maxWaitSeconds = 120) { console.log(colorize(` Waiting for ${crateName} to be available on crates.io...`, 'cyan')); @@ -164,12 +184,10 @@ function waitForCrateAvailability(crateName, maxWaitSeconds = 120) { const checkInterval = 5000; // Check every 5 seconds return new Promise((resolve, reject) => { - const checkAvailability = () => { + const checkAvailability = async () => { try { - // Use curl to check crates.io API directly - execSync(`curl -s -f "https://crates.io/api/v1/crates/${crateName}" > /dev/null`, { - stdio: 'pipe' - }); + // Use native HTTPS instead of curl for cross-platform compatibility + await httpGet(`https://crates.io/api/v1/crates/${crateName}`); console.log(colorize(` ✓ ${crateName} is now available on crates.io`, 'green')); resolve(); } catch (error) { @@ -186,14 +204,10 @@ function waitForCrateAvailability(crateName, maxWaitSeconds = 120) { }); } -// Check if a specific version of a crate is already published on crates.io -function isVersionPublished(crateName, version) { +// Check if a specific version of a crate is already published on crates.io (async) +async function isVersionPublished(crateName, version) { try { - // Use curl to check crates.io API and get the newest version - const result = execSync(`curl.exe -s "https://crates.io/api/v1/crates/${crateName}"`, { - stdio: 'pipe', - encoding: 'utf8' - }); + const result = await httpGet(`https://crates.io/api/v1/crates/${crateName}`); const data = JSON.parse(result); const newestVersion = data.crate?.newest_version || data.crate?.max_version; return newestVersion === version; @@ -224,13 +238,14 @@ async function main() { } console.log(colorize('\n📦 Crates to publish (in dependency order):', 'blue')); - CRATES_TO_PUBLISH.forEach((crate, index) => { + for (let index = 0; index < CRATES_TO_PUBLISH.length; index++) { + const crate = CRATES_TO_PUBLISH[index]; const version = getVersion(crate.path); - const published = isVersionPublished(crate.name, version); + const published = await isVersionPublished(crate.name, version); console.log( ` ${index + 1}. ${colorize(crate.name, published ? 'yellow' : 'green')} v${version} ${published ? '(already published)' : ''}` ); - }); + } console.log(colorize('\n' + '='.repeat(60), 'cyan')); @@ -252,7 +267,7 @@ async function main() { const version = getVersion(crate.path); // Skip if already published (unless force mode) - if (!force && isVersionPublished(crate.name, version)) { + if (!force && (await isVersionPublished(crate.name, version))) { console.log( colorize( `\n⏭️ [${i + 1}/${CRATES_TO_PUBLISH.length}] Skipping ${crate.name} v${version} - already published`, From baa0faf111a09edefaafa4742d0b1aff3bb9f402 Mon Sep 17 00:00:00 2001 From: Sopaco Date: Mon, 9 Mar 2026 21:59:03 +0800 Subject: [PATCH 07/20] Add entity preservation to abstract generation and improve search capabilities The changes introduce entity preservation in abstract generation by adding a known_entities parameter to the generate_with_llm method. This ensures named entities are retained verbatim in generated abstracts. The abstract generation prompt was updated to handle entity preservation, and all callers were updated to pass an empty entity list by default. Additionally, the search functionality was enhanced with intent-based query analysis that supports multiple language types and query patterns. A new unified query analysis prompt was added that returns rewritten queries, keywords, entities, intent types, and time constraints in a single LLM call. The embedding client now includes built-in rate limiting and caching to improve API usage efficiency. Several configuration structs were updated to include cache and rate limiting parameters, and the embedding cache module was removed in favor of the new built-in caching. --- .../src/automation/layer_generator.rs | 2 +- cortex-mem-core/src/cascade_layer_updater.rs | 14 +- cortex-mem-core/src/embedding/cache.rs | 228 ---- cortex-mem-core/src/embedding/client.rs | 310 ++++- cortex-mem-core/src/embedding/mod.rs | 4 +- cortex-mem-core/src/layers/generator.rs | 89 +- cortex-mem-core/src/layers/manager.rs | 32 +- cortex-mem-core/src/lib.rs | 4 +- cortex-mem-core/src/llm/client.rs | 10 +- cortex-mem-core/src/llm/prompts.rs | 91 +- cortex-mem-core/src/memory_cleanup.rs | 167 +++ cortex-mem-core/src/memory_index.rs | 58 +- cortex-mem-core/src/search/mod.rs | 47 +- cortex-mem-core/src/search/vector_engine.rs | 537 ++++----- cortex-mem-core/src/search/weight_model.rs | 74 ++ cortex-mem-core/src/session/message.rs | 2 +- cortex-mem-service/src/state.rs | 2 + cortex-mem-tools/src/operations.rs | 1 + cortex-mem-tools/src/tools/storage.rs | 4 +- .../1_embedding_api_latency_optimization.md | 772 +++++++++++++ .../2_entity_query_recall_optimization.md | 1022 +++++++++++++++++ .../3_memory_forgetting_mechanism.md | 863 ++++++++++++++ docs_plan_v2.6/4_dynamic_weight_allocation.md | 929 +++++++++++++++ examples/cortex-mem-tars/src/app.rs | 202 ++-- examples/cortex-mem-tars/src/audio_input.rs | 7 +- .../src/audio_transcription.rs | 29 +- 26 files changed, 4686 insertions(+), 814 deletions(-) delete mode 100644 cortex-mem-core/src/embedding/cache.rs create mode 100644 cortex-mem-core/src/memory_cleanup.rs create mode 100644 cortex-mem-core/src/search/weight_model.rs create mode 100644 docs_plan_v2.6/1_embedding_api_latency_optimization.md create mode 100644 docs_plan_v2.6/2_entity_query_recall_optimization.md create mode 100644 docs_plan_v2.6/3_memory_forgetting_mechanism.md create mode 100644 docs_plan_v2.6/4_dynamic_weight_allocation.md diff --git a/cortex-mem-core/src/automation/layer_generator.rs b/cortex-mem-core/src/automation/layer_generator.rs index 5ff6a3e..0f3492d 100644 --- a/cortex-mem-core/src/automation/layer_generator.rs +++ b/cortex-mem-core/src/automation/layer_generator.rs @@ -353,7 +353,7 @@ impl LayerGenerator { // 3. Use existing AbstractGenerator to generate L0 abstract let abstract_text = self .abstract_gen - .generate_with_llm(&content, &self.llm_client) + .generate_with_llm(&content, &self.llm_client, &[]) .await?; // 4. Use existing OverviewGenerator to generate L1 overview diff --git a/cortex-mem-core/src/cascade_layer_updater.rs b/cortex-mem-core/src/cascade_layer_updater.rs index a0e5c8d..c74d4ec 100644 --- a/cortex-mem-core/src/cascade_layer_updater.rs +++ b/cortex-mem-core/src/cascade_layer_updater.rs @@ -244,7 +244,7 @@ impl CascadeLayerUpdater { debug!("💔 Cache MISS, generating with LLM"); let l0 = self.l0_generator - .generate_with_llm(&content, &self.llm_client) + .generate_with_llm(&content, &self.llm_client, &[]) .await?; let l1 = self.l1_generator @@ -265,7 +265,7 @@ impl CascadeLayerUpdater { } else { // No cache, generate directly let l0 = self.l0_generator - .generate_with_llm(&content, &self.llm_client) + .generate_with_llm(&content, &self.llm_client, &[]) .await?; let l1 = self.l1_generator @@ -403,7 +403,7 @@ impl CascadeLayerUpdater { debug!("💔 Cache MISS for root, generating with LLM"); let l0 = self.l0_generator - .generate_with_llm(&aggregated, &self.llm_client) + .generate_with_llm(&aggregated, &self.llm_client, &[]) .await?; let l1 = self.l1_generator @@ -422,7 +422,7 @@ impl CascadeLayerUpdater { } } else { let l0 = self.l0_generator - .generate_with_llm(&aggregated, &self.llm_client) + .generate_with_llm(&aggregated, &self.llm_client, &[]) .await?; let l1 = self.l1_generator @@ -586,7 +586,7 @@ impl CascadeLayerUpdater { // Generate L0 abstract let abstract_text = self.l0_generator - .generate_with_llm(&content, &self.llm_client) + .generate_with_llm(&content, &self.llm_client, &[]) .await?; // Generate L1 overview @@ -698,7 +698,7 @@ impl CascadeLayerUpdater { if !month_content.is_empty() { let abstract_text = self.l0_generator - .generate_with_llm(&month_content, &self.llm_client) + .generate_with_llm(&month_content, &self.llm_client, &[]) .await?; let timestamp = chrono::Utc::now().format("%Y-%m-%d %H:%M:%S UTC"); @@ -730,7 +730,7 @@ impl CascadeLayerUpdater { if !day_content.is_empty() { let abstract_text = self.l0_generator - .generate_with_llm(&day_content, &self.llm_client) + .generate_with_llm(&day_content, &self.llm_client, &[]) .await?; let timestamp = chrono::Utc::now().format("%Y-%m-%d %H:%M:%S UTC"); diff --git a/cortex-mem-core/src/embedding/cache.rs b/cortex-mem-core/src/embedding/cache.rs deleted file mode 100644 index c4d5805..0000000 --- a/cortex-mem-core/src/embedding/cache.rs +++ /dev/null @@ -1,228 +0,0 @@ -use crate::Result; -use std::collections::HashMap; -use std::sync::Arc; -use tokio::sync::RwLock; -use std::time::{Duration, Instant}; - -/// LRU 缓存项 -#[derive(Clone)] -struct CacheItem { - embedding: Vec, - timestamp: Instant, -} - -/// Embedding 缓存配置 -#[derive(Debug, Clone)] -pub struct CacheConfig { - /// 最大缓存条目数 - pub max_entries: usize, - /// 缓存过期时间(秒) - pub ttl_secs: u64, -} - -impl Default for CacheConfig { - fn default() -> Self { - Self { - max_entries: 10000, - ttl_secs: 3600, // 1 小时 - } - } -} - -/// Embedding 缓存层 -/// -/// 为 EmbeddingClient 提供 LRU 缓存,显著减少重复查询 -pub struct EmbeddingCache { - inner: Arc, - cache: Arc>>, - access_order: Arc>>, // LRU 访问顺序 - config: CacheConfig, -} - -impl EmbeddingCache -where - T: EmbeddingProvider + Send + Sync, -{ - pub fn new(inner: Arc, config: CacheConfig) -> Self { - Self { - inner, - cache: Arc::new(RwLock::new(HashMap::new())), - access_order: Arc::new(RwLock::new(Vec::new())), - config, - } - } - - /// 嵌入单个文本(带缓存) - pub async fn embed(&self, text: &str) -> Result> { - let key = self.compute_cache_key(text); - - // 1. 尝试从缓存读取 - { - let cache = self.cache.read().await; - if let Some(item) = cache.get(&key) { - // 检查是否过期 - if item.timestamp.elapsed() < Duration::from_secs(self.config.ttl_secs) { - // 更新 LRU 访问顺序 - self.update_access_order(&key).await; - return Ok(item.embedding.clone()); - } - } - } - - // 2. 缓存未命中,调用底层 API - let embedding = self.inner.embed(text).await?; - - // 3. 写入缓存 - self.put_cache(key, embedding.clone()).await; - - Ok(embedding) - } - - /// 批量嵌入(带缓存) - pub async fn embed_batch(&self, texts: &[String]) -> Result>> { - let mut results = Vec::with_capacity(texts.len()); - let mut cache_misses = Vec::new(); - let mut miss_indices = Vec::new(); - - // 1. 检查缓存 - { - let cache = self.cache.read().await; - for (idx, text) in texts.iter().enumerate() { - let key = self.compute_cache_key(text); - - if let Some(item) = cache.get(&key) { - if item.timestamp.elapsed() < Duration::from_secs(self.config.ttl_secs) { - results.push(Some(item.embedding.clone())); - self.update_access_order(&key).await; - continue; - } - } - - // 缓存未命中 - results.push(None); - cache_misses.push(text.clone()); - miss_indices.push(idx); - } - } - - // 2. 批量查询未命中的项 - if !cache_misses.is_empty() { - let embeddings = self.inner.embed_batch(&cache_misses).await?; - - // 3. 写入缓存并填充结果 - for (text, embedding) in cache_misses.iter().zip(embeddings.iter()) { - let key = self.compute_cache_key(text); - self.put_cache(key, embedding.clone()).await; - } - - // 4. 填充缺失的结果 - for (miss_idx, embedding) in miss_indices.iter().zip(embeddings.iter()) { - results[*miss_idx] = Some(embedding.clone()); - } - } - - // 5. 转换 Option> -> Vec - Ok(results.into_iter().map(|opt| opt.unwrap()).collect()) - } - - /// 计算缓存键(使用文本哈希) - fn compute_cache_key(&self, text: &str) -> String { - use std::collections::hash_map::DefaultHasher; - use std::hash::{Hash, Hasher}; - - let mut hasher = DefaultHasher::new(); - text.hash(&mut hasher); - format!("{:x}", hasher.finish()) - } - - /// 写入缓存(带 LRU 淘汰) - async fn put_cache(&self, key: String, embedding: Vec) { - let mut cache = self.cache.write().await; - let mut access_order = self.access_order.write().await; - - // LRU 淘汰 - if cache.len() >= self.config.max_entries { - if let Some(oldest_key) = access_order.first().cloned() { - cache.remove(&oldest_key); - access_order.remove(0); - } - } - - // 插入新项 - cache.insert(key.clone(), CacheItem { - embedding, - timestamp: Instant::now(), - }); - - access_order.push(key); - } - - /// 更新 LRU 访问顺序 - async fn update_access_order(&self, key: &str) { - let mut access_order = self.access_order.write().await; - - if let Some(pos) = access_order.iter().position(|k| k == key) { - access_order.remove(pos); - access_order.push(key.to_string()); - } - } - - /// 获取缓存统计 - pub async fn stats(&self) -> CacheStats { - let cache = self.cache.read().await; - CacheStats { - total_entries: cache.len(), - max_entries: self.config.max_entries, - ttl_secs: self.config.ttl_secs, - } - } - - /// 清空缓存 - pub async fn clear(&self) { - let mut cache = self.cache.write().await; - let mut access_order = self.access_order.write().await; - cache.clear(); - access_order.clear(); - } -} - -/// 缓存统计 -#[derive(Debug, Clone)] -pub struct CacheStats { - pub total_entries: usize, - pub max_entries: usize, - pub ttl_secs: u64, -} - -/// Embedding 提供者 Trait -/// -/// 抽象出 embed 和 embed_batch 方法,便于缓存层包装 -#[async_trait::async_trait] -pub trait EmbeddingProvider { - async fn embed(&self, text: &str) -> Result>; - async fn embed_batch(&self, texts: &[String]) -> Result>>; -} - -// 为 EmbeddingClient 实现 EmbeddingProvider -#[async_trait::async_trait] -impl EmbeddingProvider for crate::embedding::EmbeddingClient { - async fn embed(&self, text: &str) -> Result> { - self.embed(text).await - } - - async fn embed_batch(&self, texts: &[String]) -> Result>> { - self.embed_batch(texts).await - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_cache_config_default() { - let config = CacheConfig::default(); - assert_eq!(config.max_entries, 10000); - assert_eq!(config.ttl_secs, 3600); - } -} diff --git a/cortex-mem-core/src/embedding/client.rs b/cortex-mem-core/src/embedding/client.rs index f2a4603..849b4d4 100644 --- a/cortex-mem-core/src/embedding/client.rs +++ b/cortex-mem-core/src/embedding/client.rs @@ -1,14 +1,147 @@ use crate::Result; use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::sync::Arc; +use std::time::{Duration, Instant}; +use tokio::sync::{Mutex, RwLock}; +use tracing::{debug, info, warn}; -/// Embedding configuration +/// Embedding 速率限制器 +/// +/// 实现令牌桶算法,保证单并发且每分钟不超过指定次数的 API 调用。 +/// 默认基准:30 次/分钟(即每次请求最小间隔 2000ms)。 +pub struct RateLimiter { + /// 上次请求完成的时间戳(None 表示尚未发出任何请求) + last_request_at: Mutex>, + /// 每次请求之间的最小间隔 + min_interval: Duration, +} + +impl RateLimiter { + /// 根据每分钟最大调用次数创建速率限制器 + pub fn new(calls_per_minute: u32) -> Self { + let calls = calls_per_minute.max(1) as u64; + let min_interval_ms = 60_000u64 / calls; + Self { + last_request_at: Mutex::new(None), + min_interval: Duration::from_millis(min_interval_ms), + } + } + + /// 等待直到可以安全发出下一次请求(保证单并发) + pub async fn acquire(&self) { + let mut last = self.last_request_at.lock().await; + if let Some(t) = *last { + let elapsed = t.elapsed(); + if elapsed < self.min_interval { + let wait = self.min_interval - elapsed; + debug!("Rate limiter: waiting {:?} before next request", wait); + tokio::time::sleep(wait).await; + } + } + *last = Some(Instant::now()); + } + + /// 遇到 429 时额外退避(5 秒) + pub async fn backoff_on_rate_limit(&self) { + warn!("Rate limit hit (429), backing off for 5 seconds"); + tokio::time::sleep(Duration::from_secs(5)).await; + } +} + +// ── 内嵌 LRU 缓存 ──────────────────────────────────────────────────────────── + +/// 缓存条目 +struct CacheItem { + embedding: Vec, + created_at: Instant, +} + +/// 内嵌 LRU 内存缓存(直接嵌入 EmbeddingClient,避免泛型包装) +struct InnerCache { + entries: HashMap, + access_order: Vec, + max_entries: usize, + ttl: Duration, +} + +impl InnerCache { + fn new(max_entries: usize, ttl_secs: u64) -> Self { + Self { + entries: HashMap::new(), + access_order: Vec::new(), + max_entries, + ttl: Duration::from_secs(ttl_secs), + } + } + + /// 查询缓存;过期则视为缺失 + fn get(&mut self, key: &str) -> Option> { + if let Some(item) = self.entries.get(key) { + if item.created_at.elapsed() < self.ttl { + // 更新 LRU 顺序 + if let Some(pos) = self.access_order.iter().position(|k| k == key) { + self.access_order.remove(pos); + self.access_order.push(key.to_string()); + } + return Some(item.embedding.clone()); + } else { + // 已过期,移除 + self.entries.remove(key); + self.access_order.retain(|k| k != key); + } + } + None + } + + /// 写入缓存(满时淘汰最旧条目) + fn put(&mut self, key: String, embedding: Vec) { + if self.entries.len() >= self.max_entries && !self.entries.contains_key(&key) { + if let Some(oldest) = self.access_order.first().cloned() { + self.entries.remove(&oldest); + self.access_order.remove(0); + } + } + self.entries.insert( + key.clone(), + CacheItem { + embedding, + created_at: Instant::now(), + }, + ); + // 将新条目放到 access_order 末尾(如已存在则先移除) + self.access_order.retain(|k| k != &key); + self.access_order.push(key); + } + + /// 计算缓存键(含模型名,防止切换模型后复用旧向量) + fn compute_key(model: &str, text: &str) -> String { + use std::collections::hash_map::DefaultHasher; + use std::hash::{Hash, Hasher}; + let mut hasher = DefaultHasher::new(); + text.hash(&mut hasher); + format!("{}:{:x}", model, hasher.finish()) + } +} + +// ── EmbeddingConfig ─────────────────────────────────────────────────────────── + +/// Embedding 客户端配置 #[derive(Debug, Clone, Serialize, Deserialize)] pub struct EmbeddingConfig { pub api_base_url: String, pub api_key: String, pub model_name: String, + /// 单批次最大文本数(默认 10) pub batch_size: usize, + /// HTTP 超时(秒,默认 30) pub timeout_secs: u64, + /// 每分钟最大 API 调用次数(默认 30,即单并发 2s 间隔) + pub calls_per_minute: u32, + /// 内存缓存最大条目数(默认 10000) + pub cache_max_entries: usize, + /// 内存缓存 TTL(秒,默认 3600) + pub cache_ttl_secs: u64, } impl Default for EmbeddingConfig { @@ -23,42 +156,156 @@ impl Default for EmbeddingConfig { .unwrap_or_else(|_| "text-embedding-3-small".to_string()), batch_size: 10, timeout_secs: 30, + calls_per_minute: 30, + cache_max_entries: 10_000, + cache_ttl_secs: 3_600, } } } -/// Embedding client +// ── EmbeddingClient ─────────────────────────────────────────────────────────── + +/// Embedding 客户端 +/// +/// 内置速率限制器(30 次/分钟单并发)和 LRU 内存缓存, +/// 对外 API 与原版保持一致。 pub struct EmbeddingClient { config: EmbeddingConfig, client: reqwest::Client, + rate_limiter: Arc, + cache: Arc>, } impl EmbeddingClient { - /// Create a new embedding client + /// 创建新的 EmbeddingClient pub fn new(config: EmbeddingConfig) -> Result { + let rate_limiter = Arc::new(RateLimiter::new(config.calls_per_minute)); + let cache = Arc::new(RwLock::new(InnerCache::new( + config.cache_max_entries, + config.cache_ttl_secs, + ))); let client = reqwest::Client::builder() - .timeout(std::time::Duration::from_secs(config.timeout_secs)) + .timeout(Duration::from_secs(config.timeout_secs)) .build() .map_err(|e| crate::Error::Embedding(format!("Failed to create HTTP client: {}", e)))?; - Ok(Self { config, client }) + info!( + "EmbeddingClient initialized: model={}, rate_limit={}/min, cache={}entries/{}s", + config.model_name, + config.calls_per_minute, + config.cache_max_entries, + config.cache_ttl_secs, + ); + + Ok(Self { + config, + client, + rate_limiter, + cache, + }) } - /// Embed a single text + /// 嵌入单个文本(带缓存) pub async fn embed(&self, text: &str) -> Result> { - let results = self.embed_batch(&[text.to_string()]).await?; - results + // 查缓存 + let cache_key = InnerCache::compute_key(&self.config.model_name, text); + { + let mut cache = self.cache.write().await; + if let Some(cached) = cache.get(&cache_key) { + debug!("Cache hit for text (len={})", text.chars().count()); + return Ok(cached); + } + } + + // 缓存未命中,调用 API(含速率控制) + let results = self.embed_batch_raw(&[text.to_string()]).await?; + let embedding = results .into_iter() .next() - .ok_or_else(|| crate::Error::Embedding("No embedding returned".to_string())) + .ok_or_else(|| crate::Error::Embedding("No embedding returned".to_string()))?; + + // 写入缓存 + { + let mut cache = self.cache.write().await; + cache.put(cache_key, embedding.clone()); + } + + Ok(embedding) } - /// Embed a batch of texts + /// 批量嵌入(自动分批 + 缓存命中跳过 API 调用) pub async fn embed_batch(&self, texts: &[String]) -> Result>> { if texts.is_empty() { return Ok(vec![]); } + let mut results: Vec>> = vec![None; texts.len()]; + let mut miss_texts: Vec = Vec::new(); + let mut miss_indices: Vec = Vec::new(); + + // 1. 批量查缓存 + { + let mut cache = self.cache.write().await; + for (idx, text) in texts.iter().enumerate() { + let key = InnerCache::compute_key(&self.config.model_name, text); + if let Some(cached) = cache.get(&key) { + results[idx] = Some(cached); + } else { + miss_texts.push(text.clone()); + miss_indices.push(idx); + } + } + } + + if miss_texts.is_empty() { + debug!("All {} embeddings served from cache", texts.len()); + return Ok(results.into_iter().map(|opt| opt.unwrap()).collect()); + } + + debug!( + "{}/{} cache misses, calling API", + miss_texts.len(), + texts.len() + ); + + // 2. 顺序分批调用 API(严格单并发) + let mut api_results: Vec> = Vec::with_capacity(miss_texts.len()); + for chunk in miss_texts.chunks(self.config.batch_size) { + let embeddings = self.embed_batch_raw(chunk).await?; + api_results.extend(embeddings); + } + + // 3. 写入缓存并填充结果 + { + let mut cache = self.cache.write().await; + for (api_idx, (text, embedding)) in + miss_texts.iter().zip(api_results.iter()).enumerate() + { + let key = InnerCache::compute_key(&self.config.model_name, text); + cache.put(key, embedding.clone()); + let result_idx = miss_indices[api_idx]; + results[result_idx] = Some(embedding.clone()); + } + } + + Ok(results.into_iter().map(|opt| opt.unwrap()).collect()) + } + + /// 分块批量嵌入(向后兼容旧接口,内部复用 embed_batch) + pub async fn embed_batch_chunked(&self, texts: &[String]) -> Result>> { + self.embed_batch(texts).await + } + + /// 获取 embedding 维度(发一次 API 请求探测) + pub async fn dimension(&self) -> Result { + let embedding = self.embed("test").await?; + Ok(embedding.len()) + } + + // ── 私有方法 ────────────────────────────────────────────────────────────── + + /// 实际调用 Embedding API 的原始方法(含速率控制,不经过缓存) + async fn embed_batch_raw(&self, texts: &[String]) -> Result>> { #[derive(Serialize)] struct EmbeddingRequest { input: Vec, @@ -82,10 +329,16 @@ impl EmbeddingClient { let url = format!("{}/embeddings", self.config.api_base_url); + // 速率控制:等待令牌(保证单并发 + 最小间隔) + self.rate_limiter.acquire().await; + let response = self .client .post(&url) - .header("Authorization", format!("Bearer {}", self.config.api_key)) + .header( + "Authorization", + format!("Bearer {}", self.config.api_key), + ) .header("Content-Type", "application/json") .json(&request) .send() @@ -94,20 +347,25 @@ impl EmbeddingClient { if !response.status().is_success() { let status = response.status(); - let text = response.text().await.unwrap_or_default(); + let body = response.text().await.unwrap_or_default(); + + // 遇到 429 额外退避 + if status.as_u16() == 429 { + self.rate_limiter.backoff_on_rate_limit().await; + } + return Err(crate::Error::Embedding(format!( "Embedding API error ({}): {}", - status, text + status, body ))); } let embedding_response: EmbeddingResponse = response .json() .await - .map_err(|e| crate::Error::Embedding(format!("Failed to parse response: {}", e)))?; - - // 强制等待1秒以避免限流 - tokio::time::sleep(std::time::Duration::from_secs(1)).await; + .map_err(|e| { + crate::Error::Embedding(format!("Failed to parse response: {}", e)) + })?; Ok(embedding_response .data @@ -115,22 +373,4 @@ impl EmbeddingClient { .map(|d| d.embedding) .collect()) } - - /// Embed texts in batches - pub async fn embed_batch_chunked(&self, texts: &[String]) -> Result>> { - let mut all_embeddings = Vec::new(); - - for chunk in texts.chunks(self.config.batch_size) { - let embeddings = self.embed_batch(chunk).await?; - all_embeddings.extend(embeddings); - } - - Ok(all_embeddings) - } - - /// Get embedding dimension - pub async fn dimension(&self) -> Result { - let embedding = self.embed("test").await?; - Ok(embedding.len()) - } } diff --git a/cortex-mem-core/src/embedding/mod.rs b/cortex-mem-core/src/embedding/mod.rs index 219a2da..08f1c04 100644 --- a/cortex-mem-core/src/embedding/mod.rs +++ b/cortex-mem-core/src/embedding/mod.rs @@ -1,5 +1,3 @@ -mod cache; -mod client; // Embedding 缓存层 +mod client; -pub use cache::{CacheConfig, CacheStats, EmbeddingCache, EmbeddingProvider}; pub use client::{EmbeddingClient, EmbeddingConfig}; diff --git a/cortex-mem-core/src/layers/generator.rs b/cortex-mem-core/src/layers/generator.rs index fd379cf..ef11bdb 100644 --- a/cortex-mem-core/src/layers/generator.rs +++ b/cortex-mem-core/src/layers/generator.rs @@ -1,37 +1,57 @@ use crate::{Result, llm::LLMClient}; use serde::{Deserialize, Serialize}; use std::sync::Arc; -use tracing::{info, debug}; +use tracing::{debug, info}; /// Abstract (L0) generator -/// +/// /// Generates a concise summary (~100 tokens) from content using LLM -/// for quick relevance checking and filtering +/// for quick relevance checking and filtering. +/// Supports entity preservation to prevent named entities from being +/// compressed away during summarization. pub struct AbstractGenerator; impl AbstractGenerator { pub fn new() -> Self { Self } - - /// Generate abstract from content using LLM (mandatory) - pub async fn generate_with_llm(&self, content: &str, llm: &Arc) -> Result { - info!("Generating L0 Abstract (content length: {} chars)", content.len()); - + + /// Generate abstract from content using LLM. + /// + /// `known_entities`: optional list of named entities that MUST be preserved + /// verbatim in the generated abstract if present in the content. + /// Pass an empty slice when no entity preservation is required. + pub async fn generate_with_llm( + &self, + content: &str, + llm: &Arc, + known_entities: &[String], + ) -> Result { + // 截断保护:content 最多取前 8000 个字符(abstract_generation_with_entities 内部也会截断,双重保护) + let char_count = content.chars().count(); + info!( + "Generating L0 Abstract (content: {} chars, entities: {:?})", + char_count, known_entities + ); + let system = r#"You are an expert at creating concise abstracts. Your goal is to generate summaries that capture multiple key aspects of content for quick relevance checking. Keep abstracts under 100 tokens. Prioritize breadth over depth - cover more topics briefly rather than elaborating on one. -Be direct and informative. Use compact phrasing to maximize information density."#; - - let prompt = crate::llm::prompts::Prompts::abstract_generation(content); - debug!("L0 Abstract prompt length: {} chars", prompt.len()); - +Be direct and informative. Use compact phrasing to maximize information density. +When asked to preserve specific named entities, include them verbatim in the abstract."#; + + let prompt = crate::llm::prompts::Prompts::abstract_generation_with_entities( + content, + known_entities, + ); + debug!("L0 Abstract prompt length: {} chars", prompt.chars().count()); + let result = llm.complete_with_system(system, &prompt).await?; - - info!("L0 Abstract generated ({} chars)", result.len()); + + info!("L0 Abstract generated ({} chars)", result.chars().count()); Ok(result) } - + /// Estimate token count (rough approximation) pub fn estimate_tokens(text: &str) -> usize { text.len() / 3 @@ -39,7 +59,7 @@ Be direct and informative. Use compact phrasing to maximize information density. } /// Overview (L1) generator -/// +/// /// Generates structured overview (~500-2000 tokens) from content using LLM pub struct OverviewGenerator; @@ -55,21 +75,32 @@ impl OverviewGenerator { pub fn new() -> Self { Self } - - /// Generate overview from content using LLM (mandatory) - pub async fn generate_with_llm(&self, content: &str, llm: &Arc) -> Result { - info!("Generating L1 Overview (content length: {} chars)", content.len()); - + + /// Generate overview from content using LLM + pub async fn generate_with_llm( + &self, + content: &str, + llm: &Arc, + ) -> Result { + // 截断保护:overview 内容最多取前 16000 个字符 + let safe_content: String = content.chars().take(16000).collect(); + + info!( + "Generating L1 Overview (content: {} chars, truncated from {})", + safe_content.chars().count(), + content.chars().count() + ); + let system = r#"You are an expert at creating structured overviews. Your goal is to provide comprehensive yet concise summaries (500-2000 tokens) that help users understand and make decisions about content. Use clear markdown structure with sections for Summary, Core Topics, Key Points, Entities, and Context."#; - - let prompt = crate::llm::prompts::Prompts::overview_generation(content); - debug!("L1 Overview prompt length: {} chars", prompt.len()); - + + let prompt = crate::llm::prompts::Prompts::overview_generation(&safe_content); + debug!("L1 Overview prompt length: {} chars", prompt.chars().count()); + let result = llm.complete_with_system(system, &prompt).await?; - - info!("L1 Overview generated ({} chars)", result.len()); + + info!("L1 Overview generated ({} chars)", result.chars().count()); Ok(result) } -} \ No newline at end of file +} diff --git a/cortex-mem-core/src/layers/manager.rs b/cortex-mem-core/src/layers/manager.rs index efcc05e..9b88246 100644 --- a/cortex-mem-core/src/layers/manager.rs +++ b/cortex-mem-core/src/layers/manager.rs @@ -100,21 +100,32 @@ impl LayerManager { self.filesystem.read(uri).await } - /// Generate all layers for a new memory (LLM is mandatory) - pub async fn generate_all_layers(&self, uri: &str, content: &str) -> Result<()> { + /// Generate all layers for a new memory (LLM is mandatory). + /// + /// `known_entities`: optional named entities that must be preserved in the L0 abstract. + /// Pass an empty slice when no entity preservation is required. + pub async fn generate_all_layers( + &self, + uri: &str, + content: &str, + known_entities: &[String], + ) -> Result<()> { // 1. Write L2 (detail) self.filesystem.write(uri, content).await?; - - // 2. Generate L0 abstract using LLM - let abstract_text = self.abstract_gen.generate_with_llm(content, &self.llm_client).await?; + + // 2. Generate L0 abstract using LLM (with entity preservation) + let abstract_text = self + .abstract_gen + .generate_with_llm(content, &self.llm_client, known_entities) + .await?; let abstract_uri = Self::get_layer_uri(uri, ContextLayer::L0Abstract); self.filesystem.write(&abstract_uri, &abstract_text).await?; - + // 3. Generate L1 overview using LLM let overview = self.overview_gen.generate_with_llm(content, &self.llm_client).await?; let overview_uri = Self::get_layer_uri(uri, ContextLayer::L1Overview); self.filesystem.write(&overview_uri, &overview).await?; - + Ok(()) } @@ -169,8 +180,11 @@ impl LayerManager { all_content.push_str("\n\n---\n\n"); } - // 3. Generate L0 abstract using LLM - let abstract_text = self.abstract_gen.generate_with_llm(&all_content, &self.llm_client).await?; + // 3. Generate L0 abstract using LLM(timeline 聚合内容,无特定实体透传) + let abstract_text = self + .abstract_gen + .generate_with_llm(&all_content, &self.llm_client, &[]) + .await?; let abstract_uri = format!("{}/.abstract.md", timeline_uri); self.filesystem.write(&abstract_uri, &abstract_text).await?; info!("Generated L0 abstract: {}", abstract_uri); diff --git a/cortex-mem-core/src/lib.rs b/cortex-mem-core/src/lib.rs index 4332eea..35e8d53 100644 --- a/cortex-mem-core/src/lib.rs +++ b/cortex-mem-core/src/lib.rs @@ -79,6 +79,7 @@ pub mod cascade_layer_debouncer; // Phase 2 optimization pub mod llm_result_cache; // Phase 3 optimization (LLM cache only) pub mod vector_sync_manager; pub mod memory_event_coordinator; +pub mod memory_cleanup; // Phase v2.6: forgetting mechanism // Re-exports pub use config::*; @@ -97,7 +98,7 @@ pub use extraction::ExtractionConfig; pub use embedding::{EmbeddingClient, EmbeddingConfig}; pub use filesystem::{CortexFilesystem, FilesystemOperations}; pub use llm::LLMClient; -pub use search::{SearchOptions, VectorSearchEngine}; +pub use search::{SearchOptions, VectorSearchEngine, SearchResult, QueryIntentType, EnhancedQueryIntent}; pub use session::{ CaseMemory, EntityMemory, EventMemory, ExtractedMemories, MemoryExtractor, Message, MessageRole, Participant, ParticipantManager, PreferenceMemory, SessionConfig, SessionManager, @@ -120,6 +121,7 @@ pub use cascade_layer_debouncer::{LayerUpdateDebouncer, DebouncerConfig}; // Ph pub use llm_result_cache::{LlmResultCache, CacheConfig, CacheStats}; // Phase 3 pub use vector_sync_manager::{VectorSyncManager, VectorSyncStats}; pub use memory_event_coordinator::{MemoryEventCoordinator, CoordinatorConfig}; // Phase 2 +pub use memory_cleanup::{MemoryCleanupService, MemoryCleanupConfig, CleanupStats}; // v2.6 // Session-related re-exports pub use session::message::MessageStorage; diff --git a/cortex-mem-core/src/llm/client.rs b/cortex-mem-core/src/llm/client.rs index ec64d2b..ad98de4 100644 --- a/cortex-mem-core/src/llm/client.rs +++ b/cortex-mem-core/src/llm/client.rs @@ -185,7 +185,7 @@ impl LLMClientImpl { let response: String = self.complete(prompt).await?; // Extract JSON from response (handles markdown code blocks) - let json_str = Self::extract_json_from_response(&response); + let json_str = Self::extract_json_from_response_static(&response); // Try to parse as structured response first if let Ok(extracted) = serde_json::from_str::(json_str) { @@ -238,7 +238,7 @@ impl LLMClientImpl { } /// Extract JSON from LLM response, handling markdown code blocks - fn extract_json_from_response(response: &str) -> &str { + pub fn extract_json_from_response_static(response: &str) -> &str { let trimmed = response.trim(); // If response is wrapped in ```json ... ``` or ``` ... ``` @@ -329,7 +329,7 @@ impl LLMClient for LLMClientImpl { let response: String = self.complete(prompt).await?; // Extract JSON from response (handles markdown code blocks) - let json_str = Self::extract_json_from_response(&response); + let json_str = Self::extract_json_from_response_static(&response); // Try to parse as structured response first if let Ok(extracted) = serde_json::from_str::(json_str) { @@ -399,7 +399,7 @@ If no facts are found, return: {{"facts": []}} let response = self.complete(&extraction_prompt).await?; // Try to extract JSON from the response - let json_str = Self::extract_json_from_response(&response); + let json_str = Self::extract_json_from_response_static(&response); // Try to parse as structured facts match serde_json::from_str::(json_str) { @@ -454,7 +454,7 @@ If no facts are found, return: {{"facts": []}} let response = self.complete(&extraction_prompt).await?; // Try to extract JSON from the response - let json_str = Self::extract_json_from_response(&response); + let json_str = Self::extract_json_from_response_static(&response); // Try to parse as detailed facts match serde_json::from_str::(json_str) { diff --git a/cortex-mem-core/src/llm/prompts.rs b/cortex-mem-core/src/llm/prompts.rs index 99e3dbc..299417a 100644 --- a/cortex-mem-core/src/llm/prompts.rs +++ b/cortex-mem-core/src/llm/prompts.rs @@ -103,28 +103,87 @@ Extracted Memories (JSON):"#, ) } - /// Prompt for intent analysis in retrieval - pub fn intent_analysis(query: &str) -> String { - format!( - r#"Analyze the following query and extract: + /// 统一查询意图分析 Prompt(一次 LLM 请求返回所有检索所需信息) + /// + /// 返回:改写查询、关键词、实体列表、意图类型、时间约束 + /// 支持中英文及混合语言查询 + pub fn unified_query_analysis(query: &str) -> String { + // 截断保护:query 最多取前 500 个字符(使用 chars 保证 Unicode 安全) + let safe_query: String = query.chars().take(500).collect(); -1. **Keywords**: Important keywords for search (2-5 words) -2. **Entities**: Named entities mentioned (people, places, technologies) -3. **Time Range**: Any time-related constraints (if mentioned) -4. **Query Type**: The type of query (factual, procedural, conceptual, etc.) + format!( + r#"Analyze the following search query and return a JSON object with all fields filled. -Format as JSON: +## Output JSON Format {{ - "keywords": ["...", "..."], - "entities": ["...", "..."], - "time_range": {{ "start": "...", "end": "..." }}, - "query_type": "..." + "rewritten_query": "expanded query for better vector retrieval (keep original meaning, add synonyms, max 80 chars)", + "keywords": ["keyword1", "keyword2"], + "entities": ["entity1", "entity2"], + "intent_type": "entity_lookup|factual|temporal|relational|search|general", + "time_constraint": {{ "start": "...", "end": "..." }} }} -Query: {} +## Field Rules +- **rewritten_query**: Expand abbreviations and add relevant synonyms. If already clear, keep as-is. Max 80 chars. +- **keywords**: 2-5 most important search terms. Must be in the same language as the query. +- **entities**: Named entities (person names, place names, tool names, technology names). Empty array if none. +- **intent_type**: Choose ONE from: + - `entity_lookup`: Query asks about a specific named entity ("王明是谁", "who is Alice", "React框架") + - `factual`: Asking for a specific fact ("X是什么", "what is X", "how does X work") + - `temporal`: Involves time reference ("最近", "上周", "recently", "last week", "yesterday") + - `relational`: Comparison or relationship ("X vs Y", "X和Y的关系", "difference between X and Y") + - `search`: Looking to find/list content ("查找", "列出", "find", "show me", "list all") + - `general`: Everything else +- **time_constraint**: Set to `null` if no time reference in query. Otherwise fill start/end as descriptive strings. + +## Query +{} + +## Response (valid JSON only, no markdown, no explanation):"#, + safe_query + ) + } -Intent Analysis (JSON):"#, - query + /// Prompt for abstract generation with optional entity preservation + /// + /// When `known_entities` is non-empty, the LLM is instructed to retain + /// those entity names in the generated abstract. + pub fn abstract_generation_with_entities(content: &str, known_entities: &[String]) -> String { + // 截断保护:content 最多取前 8000 个字符 + let safe_content: String = content.chars().take(8000).collect(); + + let entity_hint = if known_entities.is_empty() { + String::new() + } else { + format!( + "\n\nIMPORTANT: The following named entities MUST appear verbatim in the abstract \ + if they are present in the content: {}", + known_entities.join(", ") + ) + }; + + format!( + r#"Generate a concise abstract (~100 tokens maximum) for the following content. + +Requirements: +- Stay within ~100 tokens limit +- Cover MULTIPLE key aspects when content is rich (who, what, key topics, important outcomes) +- Prioritize information breadth over depth - mention more topics rather than elaborating on one +- Use compact phrasing: "discussed X, Y, and Z" instead of long explanations +- For multi-topic content: list key themes briefly rather than focusing on just one +- Use clear, direct language +- Avoid filler words and unnecessary details +- **CRITICAL: Use the SAME LANGUAGE as the input content** + - If content is in Chinese, write abstract in Chinese + - If content is in English, write abstract in English + - If content is in other languages, use that language + - Preserve the original linguistic and cultural context{} + +Content: +{} + +Abstract (max 100 tokens, in the same language as the content):"#, + entity_hint, safe_content ) } } diff --git a/cortex-mem-core/src/memory_cleanup.rs b/cortex-mem-core/src/memory_cleanup.rs new file mode 100644 index 0000000..fc0c44d --- /dev/null +++ b/cortex-mem-core/src/memory_cleanup.rs @@ -0,0 +1,167 @@ +//! Memory Cleanup Service +//! +//! 负责定期扫描记忆索引,根据记忆强度(Ebbinghaus 遗忘曲线) +//! 将低强度记忆归档或彻底删除,控制长程 Agent 的记忆空间膨胀。 + +use crate::{ + memory_index::{MemoryMetadata, MemoryScope}, + memory_index_manager::MemoryIndexManager, + Result, +}; +use std::sync::Arc; +use tracing::{info, warn}; + +/// 清理服务配置 +#[derive(Debug, Clone)] +pub struct MemoryCleanupConfig { + /// 清理间隔(小时) + pub interval_hours: u64, + /// 归档阈值:记忆强度低于此值时标记为归档(默认 0.1) + pub archive_threshold: f32, + /// 删除阈值:已归档且强度低于此值时彻底删除(默认 0.02) + pub delete_threshold: f32, +} + +impl Default for MemoryCleanupConfig { + fn default() -> Self { + Self { + interval_hours: 24, + archive_threshold: 0.1, + delete_threshold: 0.02, + } + } +} + +/// 单次清理结果统计 +#[derive(Debug, Clone, Default)] +pub struct CleanupStats { + /// 已归档条目数 + pub archived: usize, + /// 已删除条目数 + pub deleted: usize, + /// 检查的总条目数 + pub total_scanned: usize, +} + +/// 记忆清理服务 +/// +/// 使用方式: +/// ```rust,no_run +/// // 在 Agent 启动时创建,定期手动调用 run_cleanup,或用 tokio::spawn + interval 运行 +/// let svc = MemoryCleanupService::new(index_manager, MemoryCleanupConfig::default()); +/// let stats = svc.run_cleanup(&MemoryScope::User, "alice").await?; +/// println!("Archived: {}, Deleted: {}", stats.archived, stats.deleted); +/// ``` +pub struct MemoryCleanupService { + index_manager: Arc, + config: MemoryCleanupConfig, +} + +impl MemoryCleanupService { + pub fn new(index_manager: Arc, config: MemoryCleanupConfig) -> Self { + Self { + index_manager, + config, + } + } + + /// 对指定 scope/owner 执行一次记忆清理 + pub async fn run_cleanup( + &self, + scope: &MemoryScope, + owner_id: &str, + ) -> Result { + let mut stats = CleanupStats::default(); + + let index = self + .index_manager + .load_index(scope.clone(), owner_id.to_string()) + .await?; + let memory_ids: Vec = index.memories.keys().cloned().collect(); + stats.total_scanned = memory_ids.len(); + + let mut to_archive: Vec = Vec::new(); + let mut to_delete: Vec = Vec::new(); + + for (id, metadata) in &index.memories { + let strength = metadata.compute_strength(); + + if metadata.archived && strength < self.config.delete_threshold { + to_delete.push(id.clone()); + } else if !metadata.archived && strength < self.config.archive_threshold { + to_archive.push(id.clone()); + } + } + + // 先归档 + if !to_archive.is_empty() { + let mut index = self + .index_manager + .load_index(scope.clone(), owner_id.to_string()) + .await?; + for id in &to_archive { + if let Some(meta) = index.memories.get_mut(id) { + let strength = meta.compute_strength(); + info!( + "Archiving memory '{}' (strength={:.3}, key='{}')", + id, strength, meta.key + ); + meta.archived = true; + } + } + self.index_manager.save_index(&index).await?; + stats.archived = to_archive.len(); + } + + // 再删除已归档且强度极低的记忆 + if !to_delete.is_empty() { + let mut index = self + .index_manager + .load_index(scope.clone(), owner_id.to_string()) + .await?; + for id in &to_delete { + if let Some(meta) = index.memories.remove(id) { + warn!( + "Deleting archived memory '{}' (strength < {:.3}, key='{}')", + id, self.config.delete_threshold, meta.key + ); + } + } + self.index_manager.save_index(&index).await?; + stats.deleted = to_delete.len(); + } + + info!( + "Cleanup complete for {}/{}: scanned={}, archived={}, deleted={}", + scope, owner_id, stats.total_scanned, stats.archived, stats.deleted + ); + + Ok(stats) + } + + /// 批量对多个 owner 执行清理(按 scope 分组) + pub async fn run_cleanup_batch( + &self, + entries: &[(MemoryScope, String)], + ) -> Result { + let mut total = CleanupStats::default(); + for (scope, owner_id) in entries { + match self.run_cleanup(scope, owner_id).await { + Ok(stats) => { + total.total_scanned += stats.total_scanned; + total.archived += stats.archived; + total.deleted += stats.deleted; + } + Err(e) => { + warn!("Cleanup failed for {}/{}: {}", scope, owner_id, e); + } + } + } + Ok(total) + } +} + +/// 公共工具函数:直接计算某个 MemoryMetadata 的当前强度(供检索时惩罚分数使用) +pub fn compute_memory_strength(metadata: &MemoryMetadata) -> f32 { + metadata.compute_strength() +} diff --git a/cortex-mem-core/src/memory_index.rs b/cortex-mem-core/src/memory_index.rs index f9e7fcc..8e5abd4 100644 --- a/cortex-mem-core/src/memory_index.rs +++ b/cortex-mem-core/src/memory_index.rs @@ -84,39 +84,49 @@ impl std::fmt::Display for MemoryScope { pub struct MemoryMetadata { /// Unique memory ID pub id: String, - + /// File path relative to scope root pub file: String, - + /// Memory type pub memory_type: MemoryType, - + /// Primary key for matching (topic for preferences, name for entities, etc.) pub key: String, - + /// Content hash for change detection pub content_hash: String, - + /// Source session IDs that contributed to this memory pub source_sessions: Vec, - + /// Creation timestamp pub created_at: DateTime, - + /// Last update timestamp pub updated_at: DateTime, - + /// Last access timestamp pub last_accessed: DateTime, - + /// Access count pub access_count: u32, - + /// Confidence score (0.0 - 1.0) pub confidence: f32, - + /// Current content summary (for quick comparison) pub content_summary: String, + + // ── 遗忘机制字段 ───────────────────────────────────────────────────────── + + /// 巩固次数:被访问并复用的次数(越高衰减越慢) + #[serde(default)] + pub consolidation_count: u32, + + /// 是否已归档(归档后不参与常规检索,但保留数据供审计) + #[serde(default)] + pub archived: bool, } impl MemoryMetadata { @@ -145,6 +155,8 @@ impl MemoryMetadata { access_count: 0, confidence, content_summary, + consolidation_count: 0, + archived: false, } } @@ -161,10 +173,32 @@ impl MemoryMetadata { } } - /// Record an access + /// Record an access and update consolidation count pub fn record_access(&mut self) { self.last_accessed = Utc::now(); self.access_count += 1; + // 每 5 次访问触发一次巩固(减缓遗忘曲线) + if self.access_count % 5 == 0 { + self.consolidation_count += 1; + } + } + + /// 计算当前记忆强度(0.0-1.0) + /// + /// 基于艾宾浩斯遗忘曲线:strength = confidence * exp(-decay_days / consolidation_factor) + /// - decay_rate: 默认 0.1(10% 每天衰减) + /// - consolidation_factor: 1.0 + 0.2 * consolidation_count(巩固次数越多衰减越慢) + pub fn compute_strength(&self) -> f32 { + let now = Utc::now(); + let decay_days = now + .signed_duration_since(self.last_accessed) + .num_seconds() + .max(0) as f32 + / 86_400.0; + + let consolidation_factor = 1.0 + 0.2 * self.consolidation_count as f32; + let strength = self.confidence * (-0.1 * decay_days / consolidation_factor).exp(); + strength.clamp(0.0, 1.0) } } diff --git a/cortex-mem-core/src/search/mod.rs b/cortex-mem-core/src/search/mod.rs index 95e3c48..e18af41 100644 --- a/cortex-mem-core/src/search/mod.rs +++ b/cortex-mem-core/src/search/mod.rs @@ -1,3 +1,48 @@ mod vector_engine; +mod weight_model; -pub use vector_engine::{SearchOptions, SearchResult, VectorSearchEngine}; \ No newline at end of file +pub use vector_engine::{SearchOptions, SearchResult, VectorSearchEngine}; +pub use weight_model::{LayerWeights, weights_for_intent}; + +use serde::{Deserialize, Serialize}; + +/// 查询意图类型 +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum QueryIntentType { + /// 实体查询:查询特定命名实体(人名、地名、工具名等) + EntityLookup, + /// 事实性查询:询问某个具体事实 + Factual, + /// 时间性查询:涉及时间引用 + Temporal, + /// 关系性查询:比较或关联多个概念 + Relational, + /// 搜索类查询:查找/列出内容 + Search, + /// 通用查询 + General, +} + +/// 时间约束 +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TimeConstraint { + pub start: Option, + pub end: Option, +} + +/// 增强的查询意图分析结果(由 LLM 单次调用生成) +#[derive(Debug, Clone)] +pub struct EnhancedQueryIntent { + /// 原始查询 + pub original_query: String, + /// LLM 改写后的查询(用于 embedding) + pub rewritten_query: String, + /// 关键词列表(LLM 提取,支持中文) + pub keywords: Vec, + /// 实体列表(人名、地名、技术名词等) + pub entities: Vec, + /// 意图类型 + pub intent_type: QueryIntentType, + /// 时间约束(可选) + pub time_constraint: Option, +} diff --git a/cortex-mem-core/src/search/vector_engine.rs b/cortex-mem-core/src/search/vector_engine.rs index 7040cf5..ef28dcc 100644 --- a/cortex-mem-core/src/search/vector_engine.rs +++ b/cortex-mem-core/src/search/vector_engine.rs @@ -5,9 +5,12 @@ use crate::{ llm::LLMClient, vector_store::{QdrantVectorStore, VectorStore, uri_to_vector_id}, }; +use crate::llm::prompts::Prompts; +use super::{EnhancedQueryIntent, QueryIntentType, TimeConstraint}; +use super::weight_model; use serde::{Deserialize, Serialize}; use std::sync::Arc; -use tracing::{info, warn}; +use tracing::{debug, info, warn}; /// Search options #[derive(Debug, Clone, Serialize, Deserialize)] @@ -46,45 +49,17 @@ pub struct SearchResult { pub content: Option, } -/// Query intent analysis result -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct QueryIntent { - /// Original query - pub original_query: String, - /// Rewritten/expanded query for better retrieval - pub rewritten_query: Option, - /// Extracted keywords - pub keywords: Vec, - /// Detected intent type - pub intent_type: QueryIntentType, -} - -/// Types of query intents -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -pub enum QueryIntentType { - /// Factual question - Factual, - /// Searching for specific content - Search, - /// Comparing or relating concepts - Relational, - /// Looking for recent information - Temporal, - /// General/broad query - General, -} - /// Vector search engine with L0/L1/L2 layered search support pub struct VectorSearchEngine { qdrant: Arc, embedding: Arc, filesystem: Arc, - /// Optional LLM client for query rewriting + /// Optional LLM client for intent analysis llm_client: Option>, } impl VectorSearchEngine { - /// Create a new vector search engine + /// Create a new vector search engine (without LLM, intent analysis uses fallback) pub fn new( qdrant: Arc, embedding: Arc, @@ -98,7 +73,7 @@ impl VectorSearchEngine { } } - /// Create a new vector search engine with LLM support for query rewriting + /// Create a new vector search engine with LLM support for intent analysis pub fn with_llm( qdrant: Arc, embedding: Arc, @@ -122,8 +97,7 @@ impl VectorSearchEngine { // 1. Generate query embedding let query_vec = self.embedding.embed(query).await?; - // 2. Search in Qdrant - // ✅ 修复:构建包含scope的Filters + // 2. Search in Qdrant with scope filter let mut filters = crate::types::Filters::default(); if let Some(scope) = &options.root_uri { filters.uri_prefix = Some(scope.clone()); @@ -135,7 +109,7 @@ impl VectorSearchEngine { .search_with_threshold(&query_vec, &filters, options.limit, Some(options.threshold)) .await?; - // ✅ 修复:添加应用层URI前缀过滤(确保scope隔离) + // 3. Application-level URI prefix filtering (ensures scope isolation) let scope_prefix = options.root_uri.as_ref(); let scored: Vec<_> = scored .into_iter() @@ -144,14 +118,13 @@ impl VectorSearchEngine { if let Some(uri) = &result.memory.metadata.uri { return uri.starts_with(prefix); } - // 如果没有URI metadata,保守地排除(防止泄露) return false; } true }) .collect(); - // 3. Enrich results with content + // 4. Enrich results with content snippets let mut results = Vec::new(); for scored_mem in scored { let snippet = if scored_mem.memory.content.chars().count() > 200 { @@ -168,7 +141,6 @@ impl VectorSearchEngine { scored_mem.memory.content.clone() }; - // Use metadata.uri if available, otherwise fall back to id let uri = scored_mem .memory .metadata @@ -189,43 +161,37 @@ impl VectorSearchEngine { /// Layered semantic search - utilizes L0/L1/L2 three-layer architecture /// - /// This method implements a three-stage retrieval strategy: + /// Three-stage retrieval strategy: /// 1. Stage 1 (L0): Fast positioning using .abstract.md files /// 2. Stage 2 (L1): Deep exploration using .overview.md files /// 3. Stage 3 (L2): Precise matching using full message content /// - /// Combined scoring: 0.2*L0 + 0.3*L1 + 0.5*L2 + /// Dynamic scoring weights based on query intent type pub async fn layered_semantic_search( &self, query: &str, options: &SearchOptions, ) -> Result> { - // Analyze and potentially rewrite the query + // 1. LLM 统一意图分析(单次请求) let intent = self.analyze_intent(query).await?; - let search_query = intent.rewritten_query.as_deref().unwrap_or(query); - if intent.rewritten_query.is_some() { - info!("Query rewritten: '{}' -> '{}'", query, search_query); - } info!( - "Query intent: {:?}, keywords: {:?}", - intent.intent_type, intent.keywords + "Intent analysis: type={:?}, entities={:?}, keywords={:?}, rewritten='{}'", + intent.intent_type, intent.entities, intent.keywords, intent.rewritten_query ); - // 自适应阈值:根据查询类型动态调整 - let adaptive_threshold = Self::adaptive_l0_threshold(query, &intent.intent_type); + // 2. 用改写后的查询生成 embedding + let query_vec = self.embedding.embed(&intent.rewritten_query).await?; - // Generate query embedding once (use rewritten query if available) - let query_vec = self.embedding.embed(search_query).await?; + // 3. 根据意图类型动态调整 L0 阈值 + let adaptive_threshold = Self::adaptive_l0_threshold(&intent.intent_type); - // Stage 1: L0 fast positioning - search .abstract.md + // Stage 1: L0 fast positioning info!( "Stage 1: Scanning L0 abstract layer with threshold {}", adaptive_threshold ); let mut l0_filters = crate::types::Filters::with_layer("L0"); - - // Add URI prefix filter for scope-based searching if let Some(scope) = &options.root_uri { l0_filters.uri_prefix = Some(scope.clone()); } @@ -240,7 +206,7 @@ impl VectorSearchEngine { ) .await?; - // Apply URI prefix filter (application-level filtering for reliability) + // Application-level URI prefix filter let scope_prefix = options.root_uri.as_ref(); let l0_results: Vec<_> = l0_results .into_iter() @@ -254,22 +220,16 @@ impl VectorSearchEngine { }) .collect(); - // 增强降级检索策略 if l0_results.is_empty() { warn!( - "No L0 results found at threshold {}, trying fallback strategies", + "No L0 results at threshold {}, trying fallback", adaptive_threshold ); - // 策略1: 降低阈值重试(但不要降得太低,防止返回过多不相关结果) - let relaxed_threshold = if adaptive_threshold <= 0.4 { - 0.4 // 最低不低于0.4(余弦相似度约60度) - } else { - (adaptive_threshold - 0.2).max(0.4) // 降低0.2,但最低0.4 - }; - + // Fallback 1: relaxed threshold + let relaxed_threshold = (adaptive_threshold - 0.2).max(0.4); info!( - "Fallback strategy 1: Retrying L0 with relaxed threshold {}", + "Fallback: retrying L0 with relaxed threshold {}", relaxed_threshold ); let relaxed_results = self @@ -296,53 +256,46 @@ impl VectorSearchEngine { if !relaxed_results.is_empty() { info!( - "Found {} results with relaxed threshold, continuing with layered search", + "Found {} results with relaxed threshold, continuing layered search", relaxed_results.len() ); - // 使用降低阈值后的结果继续L1/L2流程 - // 重新执行L1/L2阶段(代码复用,赋值给l0_results后继续) - let l0_results = relaxed_results; return self - .continue_layered_search(query, &query_vec, l0_results, options) + .continue_layered_search(&query_vec, relaxed_results, options, &intent) .await; } else { - // 策略2: 完全降级到语义搜索(跳过L0,直接全量L2检索) - warn!( - "No results even with relaxed threshold {}, falling back to full semantic search", - relaxed_threshold - ); - warn!( - "⚠️ Semantic search fallback may return less relevant results due to lack of L0/L1 guidance" - ); + // Fallback 2: full semantic search + warn!("No L0 results even with relaxed threshold, falling back to semantic search"); return self.semantic_search(query, options).await; } } - info!( - "Found {} L0 candidates after scope filtering", - l0_results.len() - ); - - self.continue_layered_search(query, &query_vec, l0_results, options) + info!("Found {} L0 candidates", l0_results.len()); + self.continue_layered_search(&query_vec, l0_results, options, &intent) .await } - /// 继续执行分层检索的L1/L2阶段 - /// - /// 这个方法被提取出来,以便在降级重试后复用 + // ── 私有方法 ────────────────────────────────────────────────────────────── + + /// L1/L2 阶段检索(从 L0 候选集出发,逐层深入) async fn continue_layered_search( &self, - query: &str, query_vec: &[f32], l0_results: Vec, options: &SearchOptions, + intent: &EnhancedQueryIntent, ) -> Result> { - // Stage 2: L1 deep exploration - search .overview.md in candidate directories + // 动态权重:根据意图类型选择 L0/L1/L2 的贡献比例 + let weights = weight_model::weights_for_intent(&intent.intent_type); + info!( + "Layer weights: L0={:.2}, L1={:.2}, L2={:.2} (intent={:?})", + weights.l0, weights.l1, weights.l2, intent.intent_type + ); + + // Stage 2: L1 deep exploration info!("Stage 2: Exploring L1 overview layer"); let mut candidates = Vec::new(); // (dir_uri, l0_score, l1_score, is_timeline) for l0_result in l0_results { - // Get L0 file URI from metadata let l0_uri = l0_result .memory .metadata @@ -350,27 +303,19 @@ impl VectorSearchEngine { .clone() .unwrap_or_else(|| l0_result.memory.id.clone()); - // Extract directory URI from L0 file URI - // L0 file: cortex://session/xxx/timeline/.abstract.md - // Directory: cortex://session/xxx/timeline let (dir_uri, is_timeline) = Self::extract_directory_from_l0_uri(&l0_uri); - - // Generate L1 ID from directory URI (not file URI!) let l1_id = uri_to_vector_id(&dir_uri, ContextLayer::L1Overview); - // Try to get L1 layer, but don't discard if missing let l1_score = if let Ok(Some(l1_memory)) = self.qdrant.get(&l1_id).await { - Self::cosine_similarity(&query_vec, &l1_memory.embedding) + Self::cosine_similarity(query_vec, &l1_memory.embedding) } else { - // L1 not found - use L0 score as approximation (weighted lower) warn!( "L1 layer not found for {}, using L0 score as fallback", dir_uri ); - l0_result.score * 0.8 // Slightly reduce score when L1 is missing + l0_result.score * 0.8 }; - // Only add if combined threshold is likely to be met if l0_result.score >= options.threshold * 0.5 || l1_score >= options.threshold * 0.5 { candidates.push((dir_uri, l0_result.score, l1_score, is_timeline)); } @@ -378,16 +323,14 @@ impl VectorSearchEngine { info!("Found {} candidates after L1 stage", candidates.len()); - // Stage 3: L2 precise matching - search actual message content + // Stage 3: L2 precise matching info!("Stage 3: Searching L2 detail layer"); let mut final_results = Vec::new(); for (dir_uri, l0_score, l1_score, is_timeline) in candidates { if is_timeline { - // For timeline directories, list individual messages if let Ok(entries) = self.filesystem.list(&dir_uri).await { for entry in entries { - // Skip directories and hidden files (but allow .abstract.md and .overview.md for metadata) if entry.is_directory || !entry.name.ends_with(".md") || (entry.name.starts_with('.') @@ -400,16 +343,17 @@ impl VectorSearchEngine { let l2_id = uri_to_vector_id(&entry.uri, ContextLayer::L2Detail); if let Ok(Some(l2_memory)) = self.qdrant.get(&l2_id).await { let l2_score = - Self::cosine_similarity(&query_vec, &l2_memory.embedding); + Self::cosine_similarity(query_vec, &l2_memory.embedding); - // Combined scoring: 0.2*L0 + 0.3*L1 + 0.5*L2 - let combined_score = l0_score * 0.2 + l1_score * 0.3 + l2_score * 0.5; + // 动态权重合并分数 + let combined_score = + l0_score * weights.l0 + l1_score * weights.l1 + l2_score * weights.l2; if combined_score >= options.threshold { final_results.push(SearchResult { uri: entry.uri, score: combined_score, - snippet: Self::extract_snippet(&l2_memory.content, query), + snippet: Self::extract_snippet(&l2_memory.content, &intent.rewritten_query), content: Some(l2_memory.content), }); } @@ -417,31 +361,29 @@ impl VectorSearchEngine { } } } else { - // For non-timeline directories (user/agent memories), the L0 URI points to the actual file - // Try to get L2 content directly let l2_id = uri_to_vector_id(&dir_uri, ContextLayer::L2Detail); if let Ok(Some(l2_memory)) = self.qdrant.get(&l2_id).await { - let l2_score = Self::cosine_similarity(&query_vec, &l2_memory.embedding); - let combined_score = l0_score * 0.2 + l1_score * 0.3 + l2_score * 0.5; + let l2_score = Self::cosine_similarity(query_vec, &l2_memory.embedding); + let combined_score = + l0_score * weights.l0 + l1_score * weights.l1 + l2_score * weights.l2; if combined_score >= options.threshold { final_results.push(SearchResult { uri: dir_uri.clone(), score: combined_score, - snippet: Self::extract_snippet(&l2_memory.content, query), + snippet: Self::extract_snippet(&l2_memory.content, &intent.rewritten_query), content: Some(l2_memory.content), }); } } else { - // L2 not indexed, but L0/L1 were good matches - include with lower score + // L2 未索引,仅凭 L0/L1 加权降级 let combined_score = l0_score * 0.4 + l1_score * 0.6; if combined_score >= options.threshold { - // Try to read content from filesystem if let Ok(content) = self.filesystem.read(&dir_uri).await { final_results.push(SearchResult { uri: dir_uri, score: combined_score, - snippet: Self::extract_snippet(&content, query), + snippet: Self::extract_snippet(&content, &intent.rewritten_query), content: Some(content), }); } @@ -461,37 +403,158 @@ impl VectorSearchEngine { Ok(final_results) } + /// 统一意图分析(优先使用 LLM 单次调用,LLM 不可用时使用最小 fallback) + async fn analyze_intent(&self, query: &str) -> Result { + if let Some(llm) = &self.llm_client { + match self.analyze_intent_with_llm(llm.as_ref(), query).await { + Ok(intent) => return Ok(intent), + Err(e) => warn!("LLM intent analysis failed, using fallback: {}", e), + } + } + + // Fallback:LLM 不可用时的基础处理(不含规则判断,仅做基本分词) + debug!("Using fallback intent analysis (no LLM)"); + Ok(EnhancedQueryIntent { + original_query: query.to_string(), + rewritten_query: query.to_string(), + // 使用 chars 保证 Unicode 安全,过滤掉单字符词 + keywords: query + .split_whitespace() + .filter(|w| w.chars().count() > 1) + .map(|s| s.to_lowercase()) + .collect(), + entities: vec![], + intent_type: QueryIntentType::General, + time_constraint: None, + }) + } + + /// 使用 LLM 进行单次请求的统一意图分析 + async fn analyze_intent_with_llm( + &self, + llm: &dyn LLMClient, + query: &str, + ) -> Result { + let prompt = Prompts::unified_query_analysis(query); + let response = llm.complete(&prompt).await?; + + // 提取 JSON(兼容 markdown 代码块包裹) + let json_str = crate::llm::client::LLMClientImpl::extract_json_from_response_static(&response); + + let val: serde_json::Value = serde_json::from_str(json_str).map_err(|e| { + crate::Error::Llm(format!( + "Intent JSON parse error: {}. Response: {}", + e, json_str + )) + })?; + + let intent_type = match val["intent_type"].as_str().unwrap_or("general") { + "entity_lookup" => QueryIntentType::EntityLookup, + "factual" => QueryIntentType::Factual, + "temporal" => QueryIntentType::Temporal, + "relational" => QueryIntentType::Relational, + "search" => QueryIntentType::Search, + _ => QueryIntentType::General, + }; + + let keywords: Vec = val["keywords"] + .as_array() + .map(|arr| { + arr.iter() + .filter_map(|v| v.as_str().map(String::from)) + .collect() + }) + .unwrap_or_default(); + + let entities: Vec = val["entities"] + .as_array() + .map(|arr| { + arr.iter() + .filter_map(|v| v.as_str().map(String::from)) + .collect() + }) + .unwrap_or_default(); + + // 截断保护:rewritten_query 最多 200 个字符 + let rewritten: String = val["rewritten_query"] + .as_str() + .filter(|s| !s.is_empty()) + .unwrap_or(query) + .chars() + .take(200) + .collect(); + + let time_constraint = if val["time_constraint"].is_null() + || val["time_constraint"].is_object() + && val["time_constraint"]["start"].is_null() + && val["time_constraint"]["end"].is_null() + { + None + } else { + Some(TimeConstraint { + start: val["time_constraint"]["start"] + .as_str() + .map(String::from), + end: val["time_constraint"]["end"].as_str().map(String::from), + }) + }; + + Ok(EnhancedQueryIntent { + original_query: query.to_string(), + rewritten_query: rewritten, + keywords, + entities, + intent_type, + time_constraint, + }) + } + + /// 根据意图类型动态调整 L0 检索阈值 + fn adaptive_l0_threshold(intent_type: &QueryIntentType) -> f32 { + match intent_type { + // 实体查询:L0 摘要可能丢失实体,用低阈值确保候选集覆盖 + QueryIntentType::EntityLookup => { + info!("EntityLookup: using lowered L0 threshold 0.35"); + 0.35 + } + QueryIntentType::Factual => { + info!("Factual query: threshold 0.4"); + 0.4 + } + QueryIntentType::Temporal => { + info!("Temporal query: threshold 0.45"); + 0.45 + } + QueryIntentType::Search | QueryIntentType::Relational => { + info!("Search/Relational query: threshold 0.4"); + 0.4 + } + QueryIntentType::General => { + info!("General query: default threshold 0.5"); + 0.5 + } + } + } + /// Extract directory URI from L0 metadata URI - /// - /// Since we now store directory URI in metadata.uri during indexing, - /// this function is simplified to handle both old and new formats. - /// - /// Returns (directory_uri, is_timeline) fn extract_directory_from_l0_uri(l0_uri: &str) -> (String, bool) { - // New format: metadata.uri is already the directory URI - // e.g., "cortex://session/abc/timeline" for timeline - // e.g., "cortex://user/preferences" for user memories - - // Check if this looks like a directory URI (no file extension) let is_directory = !l0_uri.ends_with(".md") || l0_uri.contains("/.abstract.md") || l0_uri.contains("/.overview.md"); if is_directory { - // Handle .abstract.md suffix (old format or layer file path) if l0_uri.ends_with("/.abstract.md") { - let dir = &l0_uri[..l0_uri.len() - 13]; // Remove "/.abstract.md" + // 安全移除后缀(使用字节截断是安全的,因为后缀全是 ASCII) + let dir = &l0_uri[..l0_uri.len() - "/.abstract.md".len()]; return (dir.to_string(), dir.contains("/timeline")); } if l0_uri.ends_with("/.overview.md") { - let dir = &l0_uri[..l0_uri.len() - 13]; // Remove "/.overview.md" + let dir = &l0_uri[..l0_uri.len() - "/.overview.md".len()]; return (dir.to_string(), dir.contains("/timeline")); } - // Already a directory URI return (l0_uri.to_string(), l0_uri.contains("/timeline")); } - // It's a file URI, extract parent directory if let Some(pos) = l0_uri.rfind('/') { let dir = &l0_uri[..pos]; return (dir.to_string(), dir.contains("/timeline")); @@ -517,25 +580,19 @@ impl VectorSearchEngine { } } - /// Extract snippet around query match + /// Extract snippet around query match (Unicode safe, uses chars) fn extract_snippet(content: &str, query: &str) -> String { let query_lower = query.to_lowercase(); let content_lower = content.to_lowercase(); if let Some(byte_pos_in_lower) = content_lower.find(&query_lower) { - // Calculate character position in content_lower let char_pos = content_lower[..byte_pos_in_lower].chars().count(); - - // Since content and content_lower have the same number of characters - // (case conversion doesn't change char count), we can use the same char_pos - // to locate the position in original content let query_char_len = query.chars().count(); + let total_chars = content.chars().count(); - // Calculate start and end in char indices let start_char = char_pos.saturating_sub(100); - let end_char = (char_pos + query_char_len + 100).min(content.chars().count()); + let end_char = (char_pos + query_char_len + 100).min(total_chars); - // Extract snippet using char indices from original content let snippet: String = content .chars() .skip(start_char) @@ -548,7 +605,7 @@ impl VectorSearchEngine { snippet } } else { - // Return first 200 chars if no match + // Return first 200 chars if no match found if content.chars().count() > 200 { format!("{}...", content.chars().take(200).collect::()) } else { @@ -556,204 +613,4 @@ impl VectorSearchEngine { } } } - - /// Analyze query intent and optionally rewrite/expand the query - /// - /// If LLM client is available, uses it for intelligent query rewriting. - /// Otherwise, falls back to simple keyword extraction. - async fn analyze_intent(&self, query: &str) -> Result { - // Simple keyword extraction (always performed) - let keywords: Vec = query - .split_whitespace() - .filter(|w| w.len() > 2) // Filter out very short words - .map(|s| s.to_lowercase()) - .collect(); - - // Determine basic intent type from query patterns - let intent_type = Self::detect_intent_type(query); - - // If LLM client is available, attempt query rewriting - if let Some(llm) = &self.llm_client { - match self.rewrite_query_with_llm(llm.as_ref(), query).await { - Ok(rewritten) => { - return Ok(QueryIntent { - original_query: query.to_string(), - rewritten_query: Some(rewritten), - keywords, - intent_type, - }); - } - Err(e) => { - warn!("Query rewrite failed, using original query: {}", e); - } - } - } - - Ok(QueryIntent { - original_query: query.to_string(), - rewritten_query: None, - keywords, - intent_type, - }) - } - - /// Detect intent type from query patterns - fn detect_intent_type(query: &str) -> QueryIntentType { - let lower = query.to_lowercase(); - - // Temporal patterns - if lower.contains("when") - || lower.contains("recent") - || lower.contains("latest") - || lower.contains("yesterday") - || lower.contains("last") - || lower.contains("ago") - { - return QueryIntentType::Temporal; - } - - // Factual patterns - if lower.starts_with("what is") - || lower.starts_with("who is") - || lower.starts_with("how to") - || lower.starts_with("define") - { - return QueryIntentType::Factual; - } - - // Relational patterns - if lower.contains(" vs ") - || lower.contains(" versus ") - || lower.contains("compared to") - || lower.contains("difference between") - || lower.contains("related to") - || lower.contains("connected with") - { - return QueryIntentType::Relational; - } - - // Search patterns - if lower.starts_with("find") - || lower.starts_with("search") - || lower.starts_with("show me") - || lower.starts_with("list") - { - return QueryIntentType::Search; - } - - QueryIntentType::General - } - - /// 判断查询是否可能是实体查询(人名、地名、组织名等) - /// - /// 实体查询的特征: - /// - 查询很短(通常2-4个字符/词) - /// - 不包含问句词、连接词等 - /// - 可能是人名、地名、专有名词 - fn is_likely_entity_query(query: &str) -> bool { - let trimmed = query.trim(); - let char_count = trimmed.chars().count(); - let word_count = trimmed.split_whitespace().count(); - - // 规则1: 中文人名查询(2-4个汉字,无其他内容) - if char_count >= 2 && char_count <= 4 && word_count == 1 { - let all_cjk = trimmed.chars().all(|c| { - // 检查是否为CJK(中日韩)字符 - ('\u{4E00}'..='\u{9FFF}').contains(&c) - || ('\u{3400}'..='\u{4DBF}').contains(&c) - || ('\u{F900}'..='\u{FAFF}').contains(&c) - }); - if all_cjk { - return true; - } - } - - // 规则2: 英文短查询(1-2个单词,无疑问词) - if word_count <= 2 && char_count <= 20 { - let lower = trimmed.to_lowercase(); - let question_words = [ - "what", "when", "where", "who", "why", "how", "is", "are", "do", "does", - ]; - let has_question = question_words - .iter() - .any(|w| lower.split_whitespace().any(|word| word == *w)); - - if !has_question { - // 首字母大写的可能是专有名词 - let first_char_upper = trimmed.chars().next().map_or(false, |c| c.is_uppercase()); - if first_char_upper { - return true; - } - } - } - - false - } - - /// 根据查询意图自适应计算L0阈值 - /// - /// 不同查询类型使用不同阈值: - /// - 实体查询: 0.4 (降低阈值,因为L0摘要可能丢失实体) - /// - 事实性问题: 0.4 - /// - 主题探索/一般查询: 0.5 (默认) - fn adaptive_l0_threshold(query: &str, intent_type: &QueryIntentType) -> f32 { - // 优先检查是否是实体查询 - if Self::is_likely_entity_query(query) { - info!("Detected entity query, using lowered threshold 0.4"); - return 0.4; - } - - // 根据意图类型调整 - match intent_type { - QueryIntentType::Factual => { - info!("Factual query detected, using threshold 0.4"); - 0.4 - } - QueryIntentType::Temporal => { - info!("Temporal query detected, using threshold 0.45"); - 0.45 - } - QueryIntentType::Search | QueryIntentType::Relational => { - info!("Search/Relational query, using threshold 0.4"); - 0.4 - } - QueryIntentType::General => { - info!("General query, using default threshold 0.5"); - 0.5 - } - } - } - - /// Rewrite query using LLM for better semantic matching - async fn rewrite_query_with_llm(&self, llm: &dyn LLMClient, query: &str) -> Result { - let prompt = format!( - r#"You are a query rewriting assistant for a semantic search system. -Your task is to rewrite the user's query to improve retrieval accuracy. - -Rules: -1. Expand abbreviations and clarify ambiguous terms -2. Add relevant synonyms or related terms that might appear in documents -3. Keep the original meaning - do NOT change the user's intent -4. If the query is already clear and specific, return it unchanged -5. Keep the rewritten query concise (max 50 words) -6. Return ONLY the rewritten query, no explanations - -Original query: {} - -Rewritten query:"#, - query - ); - - let response = llm.complete(&prompt).await?; - - // Clean up the response - let rewritten = response.trim().lines().next().unwrap_or(query).to_string(); - - // If the rewrite is too different or empty, return original - if rewritten.is_empty() || rewritten.len() > query.len() * 3 { - return Ok(query.to_string()); - } - - Ok(rewritten) - } } diff --git a/cortex-mem-core/src/search/weight_model.rs b/cortex-mem-core/src/search/weight_model.rs new file mode 100644 index 0000000..639c4ad --- /dev/null +++ b/cortex-mem-core/src/search/weight_model.rs @@ -0,0 +1,74 @@ +use super::QueryIntentType; + +/// 三层检索权重(L0 / L1 / L2) +#[derive(Debug, Clone, Copy)] +pub struct LayerWeights { + pub l0: f32, + pub l1: f32, + pub l2: f32, +} + +impl Default for LayerWeights { + fn default() -> Self { + Self { + l0: 0.2, + l1: 0.3, + l2: 0.5, + } + } +} + +impl LayerWeights { + /// 归一化权重(确保三者之和为 1.0) + pub fn normalize(self) -> Self { + let total = self.l0 + self.l1 + self.l2; + if total <= 0.0 { + return Self::default(); + } + Self { + l0: self.l0 / total, + l1: self.l1 / total, + l2: self.l2 / total, + } + } +} + +/// 根据查询意图类型返回对应的动态权重 +/// +/// 权重策略: +/// - EntityLookup:极度倾向 L2 Detail(实体信息最可能在原始内容中) +/// - Factual:偏向 L2,但 L1 也有价值 +/// - Temporal:L1 Overview 时间线归纳有优势,适当平衡 +/// - Relational:L1 Overview 全局结构对比能力最强 +/// - Search:L0 Abstract 宽泛定位,权重均衡 +/// - General:默认均衡权重 +pub fn weights_for_intent(intent_type: &QueryIntentType) -> LayerWeights { + match intent_type { + QueryIntentType::EntityLookup => LayerWeights { + l0: 0.1, + l1: 0.2, + l2: 0.7, + }, + QueryIntentType::Factual => LayerWeights { + l0: 0.15, + l1: 0.25, + l2: 0.6, + }, + QueryIntentType::Temporal => LayerWeights { + l0: 0.2, + l1: 0.35, + l2: 0.45, + }, + QueryIntentType::Relational => LayerWeights { + l0: 0.2, + l1: 0.5, + l2: 0.3, + }, + QueryIntentType::Search => LayerWeights { + l0: 0.35, + l1: 0.35, + l2: 0.3, + }, + QueryIntentType::General => LayerWeights::default(), + } +} diff --git a/cortex-mem-core/src/session/message.rs b/cortex-mem-core/src/session/message.rs index 1a42daf..71bda8c 100644 --- a/cortex-mem-core/src/session/message.rs +++ b/cortex-mem-core/src/session/message.rs @@ -147,7 +147,7 @@ impl MessageStorage { let thread_uri = format!("cortex://session/{}", thread_id); let content = message.to_markdown(); if let Err(e) = layer_manager - .generate_all_layers(&thread_uri, &content) + .generate_all_layers(&thread_uri, &content, &[]) .await { tracing::warn!("Failed to generate layers for thread {}: {}", thread_id, e); diff --git a/cortex-mem-service/src/state.rs b/cortex-mem-service/src/state.rs index ab99639..e02bc8c 100644 --- a/cortex-mem-service/src/state.rs +++ b/cortex-mem-service/src/state.rs @@ -159,6 +159,7 @@ impl AppState { model_name: config.embedding.model_name, batch_size: config.embedding.batch_size, timeout_secs: config.embedding.timeout_secs, + ..EmbeddingConfig::default() }; // Qdrant config @@ -216,6 +217,7 @@ impl AppState { model_name: model, batch_size: 10, timeout_secs: 30, + ..EmbeddingConfig::default() }) } else { tracing::warn!("Embedding not configured"); diff --git a/cortex-mem-tools/src/operations.rs b/cortex-mem-tools/src/operations.rs index 712c5ea..ba5286e 100644 --- a/cortex-mem-tools/src/operations.rs +++ b/cortex-mem-tools/src/operations.rs @@ -152,6 +152,7 @@ impl MemoryOperations { model_name: embedding_model_name.to_string(), batch_size: 10, timeout_secs: 30, + ..EmbeddingConfig::default() }; let embedding_client = Arc::new(EmbeddingClient::new(embedding_config)?); tracing::info!("Embedding client initialized"); diff --git a/cortex-mem-tools/src/tools/storage.rs b/cortex-mem-tools/src/tools/storage.rs index 6d8f508..15da387 100644 --- a/cortex-mem-tools/src/tools/storage.rs +++ b/cortex-mem-tools/src/tools/storage.rs @@ -180,7 +180,7 @@ impl MemoryOperations { } else { // Fallback: synchronous generation (should not happen in production) tracing::warn!("⚠️ memory_event_tx not available, falling back to sync generation"); - if let Err(e) = self.layer_manager.generate_all_layers(&uri, &args.content).await { + if let Err(e) = self.layer_manager.generate_all_layers(&uri, &args.content, &[]).await { tracing::warn!("Failed to generate layers for {}: {}", uri, e); } } @@ -205,7 +205,7 @@ impl MemoryOperations { tracing::debug!("📤 Sent LayerUpdateNeeded event for agent scope"); } else { tracing::warn!("⚠️ memory_event_tx not available, falling back to sync generation"); - if let Err(e) = self.layer_manager.generate_all_layers(&uri, &args.content).await { + if let Err(e) = self.layer_manager.generate_all_layers(&uri, &args.content, &[]).await { tracing::warn!("Failed to generate layers for {}: {}", uri, e); } } diff --git a/docs_plan_v2.6/1_embedding_api_latency_optimization.md b/docs_plan_v2.6/1_embedding_api_latency_optimization.md new file mode 100644 index 0000000..6cf97f5 --- /dev/null +++ b/docs_plan_v2.6/1_embedding_api_latency_optimization.md @@ -0,0 +1,772 @@ +# 嵌入 API 延迟优化技术方案 + +## 1. 问题分析 + +### 1.1 当前实现缺陷 + +**位置**: `cortex-mem-core/src/embedding/client.rs:103-105` + +```rust +// 当前实现:强制等待1秒以避免限流 +tokio::time::sleep(std::time::Duration::from_secs(1)).await; +``` + +这个固定延迟存在以下问题: + +1. **过度等待**:API 可能允许更高的请求频率,固定 1 秒延迟过于保守 +2. **无法应对突发流量**:批量索引 100 个文件至少需要 100 秒,用户体验极差 +3. **浪费 API 配额**:在低频使用时,等待时间完全可以被利用 +4. **不可配置**:硬编码延迟时间,无法根据不同 API 提供商调整 + +### 1.2 现有缓存层的问题 + +**位置**: `cortex-mem-core/src/embedding/cache.rs` + +虽然已有 EmbeddingCache 实现,但存在以下问题: + +1. **缓存层未集成**:缓存代码存在但未被实际使用 +2. **缓存键过于简单**:仅使用文本哈希,未考虑模型版本变化 +3. **缺少持久化**:重启后缓存丢失,需要重新请求 API +4. **缺少统计信息**:无法监控命中率和效果 + +### 1.3 批处理效率问题 + +当前 `embed_batch_chunked` 方法是顺序执行的: + +```rust +pub async fn embed_batch_chunked(&self, texts: &[String]) -> Result>> { + for chunk in texts.chunks(self.config.batch_size) { + let embeddings = self.embed_batch(chunk).await?; // 顺序执行 + all_embeddings.extend(embeddings); + } +} +``` + +## 2. 解决方案 + +### 2.1 架构设计 + +``` +┌─────────────────────────────────────────────────────────────────────────────┐ +│ 优化后的嵌入生成架构 │ +├─────────────────────────────────────────────────────────────────────────────┤ +│ │ +│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │ +│ │ 用户请求 │───►│ 持久化缓存 │───►│ 自适应限流器 │───►│ 批量并发 │ │ +│ │ │ │ (磁盘+内存) │ │ │ │ 调度器 │ │ +│ └─────────────┘ └─────────────┘ └─────────────┘ └─────────────┘ │ +│ │ │ │ │ +│ ▼ ▼ ▼ │ +│ ┌──────────────────────────────────────────────────┐ │ +│ │ 嵌入 API │ │ +│ └──────────────────────────────────────────────────┘ │ +│ │ +└─────────────────────────────────────────────────────────────────────────────┘ +``` + +### 2.2 自适应速率限制器 + +#### 2.2.1 令牌桶算法实现 + +```rust +// 文件: cortex-mem-core/src/embedding/rate_limiter.rs (新建) + +use std::sync::Arc; +use tokio::sync::Mutex; +use std::time::{Duration, Instant}; + +/// 速率限制配置 +#[derive(Debug, Clone)] +pub struct RateLimitConfig { + /// 令牌桶容量(最大突发请求数) + pub max_burst: u32, + /// 令牌补充速率(每秒补充的令牌数) + pub tokens_per_second: f32, + /// 最小等待时间(毫秒),避免过于频繁的请求 + pub min_wait_ms: u64, + /// 最大等待时间(毫秒),超过此时间报错 + pub max_wait_ms: u64, +} + +impl Default for RateLimitConfig { + fn default() -> Self { + Self { + max_burst: 10, // 允许突发 10 个请求 + tokens_per_second: 2.0, // 每秒 2 个请求(根据 API 调整) + min_wait_ms: 50, // 最小间隔 50ms + max_wait_ms: 30000, // 最多等待 30 秒 + } + } +} + +/// OpenAI API 配置 +impl RateLimitConfig { + pub fn openai() -> Self { + Self { + max_burst: 20, + tokens_per_second: 5.0, // OpenAI 允许较高频率 + min_wait_ms: 20, + max_wait_ms: 60000, + } + } + + /// Azure OpenAI 配置 + pub fn azure_openai() -> Self { + Self { + max_burst: 10, + tokens_per_second: 3.0, + min_wait_ms: 50, + max_wait_ms: 60000, + } + } + + /// 本地模型配置(无限制) + pub fn local() -> Self { + Self { + max_burst: 1000, + tokens_per_second: 1000.0, + min_wait_ms: 0, + max_wait_ms: 0, + } + } +} + +/// 自适应速率限制器 +pub struct AdaptiveRateLimiter { + config: RateLimitConfig, + state: Arc>, +} + +struct RateLimiterState { + tokens: f32, + last_refill: Instant, + /// 自适应调整:记录最近的错误 + recent_errors: u32, + /// 自适应调整:当前退避倍数 + backoff_multiplier: f32, +} + +impl AdaptiveRateLimiter { + pub fn new(config: RateLimitConfig) -> Self { + Self { + config, + state: Arc::new(Mutex::new(RateLimiterState { + tokens: config.max_burst as f32, + last_refill: Instant::now(), + recent_errors: 0, + backoff_multiplier: 1.0, + })), + } + } + + /// 获取一个令牌(阻塞直到可用) + pub async fn acquire(&self) -> Result<(), crate::Error> { + let wait_duration = self.try_acquire().await?; + + if let Some(duration) = wait_duration { + if duration.as_millis() as u64 > self.config.max_wait_ms { + return Err(crate::Error::Embedding( + "Rate limit exceeded: max wait time reached".to_string() + )); + } + tokio::time::sleep(duration).await; + } + + // 确保最小间隔 + if self.config.min_wait_ms > 0 { + tokio::time::sleep(Duration::from_millis(self.config.min_wait_ms)).await; + } + + Ok(()) + } + + async fn try_acquire(&self) -> Result, crate::Error> { + let mut state = self.state.lock().await; + + // 补充令牌 + let now = Instant::now(); + let elapsed = now.duration_since(state.last_refill).as_secs_f32(); + let tokens_to_add = elapsed * self.config.tokens_per_second / state.backoff_multiplier; + state.tokens = (state.tokens + tokens_to_add).min(self.config.max_burst as f32); + state.last_refill = now; + + if state.tokens >= 1.0 { + state.tokens -= 1.0; + Ok(None) + } else { + // 计算需要等待的时间 + let tokens_needed = 1.0 - state.tokens; + let wait_secs = tokens_needed * state.backoff_multiplier / self.config.tokens_per_second; + Ok(Some(Duration::from_secs_f32(wait_secs))) + } + } + + /// 报告错误(用于自适应调整) + pub async fn report_error(&self, is_rate_limit_error: bool) { + let mut state = self.state.lock().await; + + if is_rate_limit_error { + state.recent_errors += 1; + // 指数退避 + state.backoff_multiplier = (state.backoff_multiplier * 1.5).min(10.0); + tracing::warn!( + "Rate limit error detected, increasing backoff to {:.2}x", + state.backoff_multiplier + ); + } + } + + /// 报告成功(恢复正常速率) + pub async fn report_success(&self) { + let mut state = self.state.lock().await; + state.recent_errors = 0; + // 逐渐恢复 + state.backoff_multiplier = (state.backoff_multiplier * 0.9).max(1.0); + } + + /// 获取当前状态 + pub async fn status(&self) -> RateLimiterStatus { + let state = self.state.lock().await; + RateLimiterStatus { + available_tokens: state.tokens, + backoff_multiplier: state.backoff_multiplier, + recent_errors: state.recent_errors, + } + } +} + +#[derive(Debug, Clone)] +pub struct RateLimiterStatus { + pub available_tokens: f32, + pub backoff_multiplier: f32, + pub recent_errors: u32, +} +``` + +### 2.3 持久化缓存层 + +#### 2.3.1 缓存存储设计 + +```rust +// 文件: cortex-mem-core/src/embedding/persistent_cache.rs (新建) + +use serde::{Deserialize, Serialize}; +use std::path::PathBuf; +use std::collections::HashMap; +use tokio::sync::RwLock; +use std::time::{Duration, Instant}; + +/// 缓存条目 +#[derive(Debug, Clone, Serialize, Deserialize)] +struct CacheEntry { + /// 嵌入向量 + embedding: Vec, + /// 模型名称(用于模型升级时失效) + model_name: String, + /// 模型版本(如果 API 提供) + model_version: Option, + /// 创建时间戳 + created_at: i64, + /// 最后访问时间戳 + last_accessed: i64, + /// 访问次数 + access_count: u32, + /// 文本哈希 + text_hash: String, +} + +/// 持久化缓存配置 +#[derive(Debug, Clone)] +pub struct PersistentCacheConfig { + /// 缓存目录 + pub cache_dir: PathBuf, + /// 内存中保留的热点条目数 + pub hot_cache_size: usize, + /// TTL(秒) + pub ttl_secs: u64, + /// 是否启用磁盘持久化 + pub persist_to_disk: bool, +} + +impl Default for PersistentCacheConfig { + fn default() -> Self { + Self { + cache_dir: PathBuf::from("./.embedding_cache"), + hot_cache_size: 10000, + ttl_secs: 86400 * 7, // 7 天 + persist_to_disk: true, + } + } +} + +/// 两级缓存:内存(热点)+ 磁盘(持久化) +pub struct PersistentEmbeddingCache { + config: PersistentCacheConfig, + model_name: String, + /// 内存中的热点缓存(LRU) + hot_cache: Arc>>, + /// 访问顺序(LRU 实现) + access_order: Arc>>, + /// 统计信息 + stats: Arc>, +} + +#[derive(Debug, Clone, Default)] +pub struct CacheStats { + pub total_requests: u64, + pub hot_hits: u64, + pub disk_hits: u64, + pub misses: u64, + pub evictions: u64, +} + +impl PersistentEmbeddingCache { + pub async fn new(config: PersistentCacheConfig, model_name: String) -> Result { + let cache = Self { + config, + model_name, + hot_cache: Arc::new(RwLock::new(HashMap::new())), + access_order: Arc::new(RwLock::new(Vec::new())), + stats: Arc::new(RwLock::new(CacheStats::default())), + }; + + // 从磁盘加载缓存 + if cache.config.persist_to_disk { + cache.load_from_disk().await?; + } + + Ok(cache) + } + + /// 生成缓存键(包含模型信息) + fn make_key(&self, text: &str) -> String { + use std::collections::hash_map::DefaultHasher; + use std::hash::{Hash, Hasher}; + + let mut hasher = DefaultHasher::new(); + text.hash(&mut hasher); + self.model_name.hash(&mut hasher); + + format!("{}:{:016x}", self.model_name, hasher.finish()) + } + + /// 获取嵌入(优先从缓存) + pub async fn get(&self, text: &str) -> Option> { + let key = self.make_key(text); + + // 统计请求 + { + let mut stats = self.stats.write().await; + stats.total_requests += 1; + } + + // 1. 先查内存缓存 + { + let cache = self.hot_cache.read().await; + if let Some(entry) = cache.get(&key) { + if self.is_valid(entry) { + // 更新访问信息 + self.record_access(&key).await; + + let mut stats = self.stats.write().await; + stats.hot_hits += 1; + + return Some(entry.embedding.clone()); + } + } + } + + // 2. 查磁盘缓存 + if self.config.persist_to_disk { + if let Ok(Some(entry)) = self.load_from_disk_entry(&key).await { + if self.is_valid(&entry) { + // 提升到内存缓存 + self.put_hot_cache(key.clone(), entry.clone()).await; + + let mut stats = self.stats.write().await; + stats.disk_hits += 1; + + return Some(entry.embedding); + } + } + } + + // 3. 缓存未命中 + { + let mut stats = self.stats.write().await; + stats.misses += 1; + } + + None + } + + /// 存入缓存 + pub async fn put(&self, text: &str, embedding: Vec) { + let key = self.make_key(text); + let text_hash = self.hash_text(text); + + let entry = CacheEntry { + embedding, + model_name: self.model_name.clone(), + model_version: None, + created_at: chrono::Utc::now().timestamp(), + last_accessed: chrono::Utc::now().timestamp(), + access_count: 1, + text_hash, + }; + + // 存入内存缓存 + self.put_hot_cache(key.clone(), entry.clone()).await; + + // 持久化到磁盘 + if self.config.persist_to_disk { + let _ = self.save_to_disk_entry(&key, &entry).await; + } + } + + /// 批量获取 + pub async fn get_batch(&self, texts: &[String]) -> Vec>> { + let mut results = Vec::with_capacity(texts.len()); + for text in texts { + results.push(self.get(text).await); + } + results + } + + /// 批量存储 + pub async fn put_batch(&self, texts: &[String], embeddings: Vec>) { + for (text, embedding) in texts.iter().zip(embeddings.into_iter()) { + self.put(text, embedding).await; + } + } + + /// 获取命中率 + pub async fn hit_rate(&self) -> f32 { + let stats = self.stats.read().await; + if stats.total_requests == 0 { + return 0.0; + } + (stats.hot_hits + stats.disk_hits) as f32 / stats.total_requests as f32 + } + + /// 清理过期缓存 + pub async fn cleanup_expired(&self) -> usize { + let mut cleaned = 0; + + // 清理内存缓存 + { + let mut cache = self.hot_cache.write().await; + let now = chrono::Utc::now().timestamp(); + let ttl = self.config.ttl_secs as i64; + + cache.retain(|_, entry| { + let valid = now - entry.created_at < ttl; + if !valid { + cleaned += 1; + } + valid + }); + } + + // 清理磁盘缓存 + if self.config.persist_to_disk { + cleaned += self.cleanup_disk_cache().await.unwrap_or(0); + } + + cleaned + } + + // ... 其他私有方法实现 + async fn is_valid(&self, entry: &CacheEntry) -> bool { + let now = chrono::Utc::now().timestamp(); + now - entry.created_at < self.config.ttl_secs as i64 + && entry.model_name == self.model_name + } + + async fn put_hot_cache(&self, key: String, entry: CacheEntry) { + let mut cache = self.hot_cache.write().await; + let mut order = self.access_order.write().await; + + // LRU 淘汰 + while cache.len() >= self.config.hot_cache_size { + if let Some(oldest) = order.first().cloned() { + cache.remove(&oldest); + order.remove(0); + + let mut stats = self.stats.write().await; + stats.evictions += 1; + } + } + + cache.insert(key.clone(), entry); + order.push(key); + } + + async fn record_access(&self, key: &str) { + let mut order = self.access_order.write().await; + if let Some(pos) = order.iter().position(|k| k == key) { + order.remove(pos); + order.push(key.to_string()); + } + } + + fn hash_text(&self, text: &str) -> String { + use std::collections::hash_map::DefaultHasher; + use std::hash::{Hash, Hasher}; + let mut hasher = DefaultHasher::new(); + text.hash(&mut hasher); + format!("{:016x}", hasher.finish()) + } + + async fn load_from_disk(&self) -> Result<(), crate::Error> { + // 实现:从磁盘加载缓存索引 + Ok(()) + } + + async fn load_from_disk_entry(&self, key: &str) -> Result, crate::Error> { + // 实现:从磁盘加载单个条目 + Ok(None) + } + + async fn save_to_disk_entry(&self, key: &str, entry: &CacheEntry) -> Result<(), crate::Error> { + // 实现:保存单个条目到磁盘 + Ok(()) + } + + async fn cleanup_disk_cache(&self) -> Result { + // 实现:清理磁盘上的过期缓存 + Ok(0) + } +} +``` + +### 2.4 集成改进后的 EmbeddingClient + +```rust +// 文件: cortex-mem-core/src/embedding/client.rs (修改) + +use crate::embedding::{AdaptiveRateLimiter, PersistentEmbeddingCache, RateLimitConfig}; +use std::sync::Arc; + +/// 增强的嵌入客户端 +pub struct EmbeddingClientV2 { + config: EmbeddingConfig, + http_client: reqwest::Client, + rate_limiter: AdaptiveRateLimiter, + cache: Option>, +} + +impl EmbeddingClientV2 { + pub fn new(config: EmbeddingConfig) -> Result { + let http_client = reqwest::Client::builder() + .timeout(std::time::Duration::from_secs(config.timeout_secs)) + .build() + .map_err(|e| crate::Error::Embedding(format!("Failed to create HTTP client: {}", e)))?; + + // 根据配置选择速率限制策略 + let rate_limit_config = match config.rate_limit_preset.as_str() { + "openai" => RateLimitConfig::openai(), + "azure" => RateLimitConfig::azure_openai(), + "local" => RateLimitConfig::local(), + _ => RateLimitConfig::default(), + }; + + Ok(Self { + config, + http_client, + rate_limiter: AdaptiveRateLimiter::new(rate_limit_config), + cache: None, // 延迟初始化 + }) + } + + /// 启用持久化缓存 + pub async fn with_cache(mut self, cache_dir: impl Into) -> Result { + let cache_config = PersistentCacheConfig { + cache_dir: cache_dir.into(), + ..Default::default() + }; + + self.cache = Some(Arc::new( + PersistentEmbeddingCache::new(cache_config, self.config.model_name.clone()).await? + )); + + Ok(self) + } + + /// 嵌入单个文本 + pub async fn embed(&self, text: &str) -> Result> { + // 1. 尝试从缓存获取 + if let Some(cache) = &self.cache { + if let Some(embedding) = cache.get(text).await { + tracing::debug!("Embedding cache hit"); + return Ok(embedding); + } + } + + // 2. 获取速率限制许可 + self.rate_limiter.acquire().await?; + + // 3. 调用 API + let embedding = self.call_api(&[text.to_string()]).await? + .into_iter() + .next() + .ok_or_else(|| crate::Error::Embedding("No embedding returned".to_string()))?; + + // 4. 存入缓存 + if let Some(cache) = &self.cache { + cache.put(text, embedding.clone()).await; + } + + // 5. 报告成功(用于自适应调整) + self.rate_limiter.report_success().await; + + Ok(embedding) + } + + /// 批量嵌入(智能调度) + pub async fn embed_batch_smart(&self, texts: &[String]) -> Result>> { + if texts.is_empty() { + return Ok(vec![]); + } + + // 1. 批量检查缓存 + let mut results: Vec>> = vec![None; texts.len()]; + let mut cache_miss_indices = Vec::new(); + let mut cache_miss_texts = Vec::new(); + + if let Some(cache) = &self.cache { + let cached = cache.get_batch(texts).await; + for (i, result) in cached.into_iter().enumerate() { + if let Some(embedding) = result { + results[i] = Some(embedding); + } else { + cache_miss_indices.push(i); + cache_miss_texts.push(texts[i].clone()); + } + } + } else { + cache_miss_indices = (0..texts.len()).collect(); + cache_miss_texts = texts.to_vec(); + } + + // 2. 批量处理未命中的请求 + if !cache_miss_texts.is_empty() { + let mut all_embeddings = Vec::new(); + + // 并发批处理(根据速率限制调整并发度) + let concurrent_batches = self.calculate_concurrent_batches().await; + for chunk in cache_miss_texts.chunks(self.config.batch_size * concurrent_batches) { + // 获取批量许可 + for _ in 0..(chunk.len().div_ceil(self.config.batch_size)) { + self.rate_limiter.acquire().await?; + } + + // 并发调用 API + let embeddings = self.call_api(chunk).await?; + all_embeddings.extend(embeddings); + } + + // 3. 存入缓存并填充结果 + if let Some(cache) = &self.cache { + cache.put_batch(&cache_miss_texts, all_embeddings.clone()).await; + } + + for (idx, embedding) in cache_miss_indices.into_iter().zip(all_embeddings.into_iter()) { + results[idx] = Some(embedding); + } + } + + Ok(results.into_iter().map(|opt| opt.unwrap()).collect()) + } + + /// 计算可以并发执行的批次数 + async fn calculate_concurrent_batches(&self) -> usize { + let status = self.rate_limiter.status().await; + // 根据可用令牌和退避倍数计算并发度 + let base_concurrent = (status.available_tokens as usize).max(1); + let adjusted = (base_concurrent as f32 / status.backoff_multiplier) as usize; + adjusted.max(1).min(4) // 限制最大并发为 4 + } + + async fn call_api(&self, texts: &[String]) -> Result>> { + // ... 原有的 API 调用逻辑,移除固定延迟 + // ... 错误处理时调用 self.rate_limiter.report_error(true).await + } +} +``` + +### 2.5 配置扩展 + +```toml +# config.toml 新增配置项 + +[embedding] +api_base_url = "https://api.openai.com/v1" +api_key = "${OPENAI_API_KEY}" +model_name = "text-embedding-3-small" +batch_size = 32 +timeout_secs = 30 + +# 新增:速率限制预设 +rate_limit_preset = "openai" # openai, azure, local, custom + +# 新增:缓存配置 +[embedding.cache] +enabled = true +cache_dir = "./.embedding_cache" +hot_cache_size = 10000 +ttl_days = 7 +persist_to_disk = true + +# 新增:自定义速率限制(当 preset = "custom" 时使用) +[embedding.rate_limit] +max_burst = 10 +tokens_per_second = 2.0 +min_wait_ms = 50 +max_wait_ms = 30000 +``` + +## 3. 实现计划 + +### 3.1 文件结构 + +``` +cortex-mem-core/src/embedding/ +├── mod.rs # 模块导出 +├── client.rs # 主客户端(修改) +├── cache.rs # 内存缓存(保留,简化) +├── persistent_cache.rs # 持久化缓存(新建) +└── rate_limiter.rs # 自适应速率限制(新建) +``` + +### 3.2 实现步骤 + +| 步骤 | 任务 | 预期效果 | +|------|------|----------| +| 1 | 实现自适应速率限制器 | 移除固定延迟,提升 30-50% 吞吐量 | +| 2 | 实现持久化缓存 | 减少 60-80% 重复 API 调用 | +| 3 | 重构 EmbeddingClient | 集成缓存和速率限制 | +| 4 | 添加配置支持 | 允许用户自定义策略 | +| 5 | 添加监控指标 | 支持性能调优 | + +### 3.3 测试要点 + +1. **单元测试**:速率限制器的令牌补充和消耗逻辑 +2. **集成测试**:缓存命中率、API 调用减少率 +3. **压力测试**:批量索引 1000 个文件的耗时对比 +4. **恢复测试**:模拟 429 错误时的自动退避 + +## 4. 预期收益 + +| 指标 | 当前 | 优化后 | 提升 | +|------|------|--------|------| +| 批量索引 100 文件 | ~100s | ~20-30s | 70%+ | +| 重复查询延迟 | 1-2s | <10ms | 99%+ | +| API 调用次数(有缓存) | 100% | 20-40% | 60-80% | +| 429 错误恢复时间 | 手动 | 自动 | N/A | + +## 5. 兼容性说明 + +1. **向后兼容**:新配置项有默认值,旧配置文件无需修改 +2. **渐进式启用**:缓存默认启用,可通过配置关闭 +3. **平滑迁移**:保留原有 `embed_batch` 方法签名,新增 `embed_batch_smart` diff --git a/docs_plan_v2.6/2_entity_query_recall_optimization.md b/docs_plan_v2.6/2_entity_query_recall_optimization.md new file mode 100644 index 0000000..f1327fe --- /dev/null +++ b/docs_plan_v2.6/2_entity_query_recall_optimization.md @@ -0,0 +1,1022 @@ +# 实体查询召回优化技术方案 + +## 1. 问题分析 + +### 1.1 L0 抽象层信息丢失问题 + +**位置**: `cortex-mem-core/src/layers/generator.rs` + +L0 抽象层设计为 ~100 tokens 的精简摘要,用于快速筛选候选。但在压缩过程中丢失了关键实体信息: + +```rust +// 当前 L0 prompt 要求 +"Cover MULTIPLE key aspects when content is rich (who, what, key topics, important outcomes)" +"Prioritize information breadth over depth" +``` + +**问题表现**: +- 用户查询"王明是谁"时,L0 摘要可能只写了"讨论了项目进展",丢失了具体人名 +- 导致 L0 层召回失败,需要降级到更低的阈值或全量 L2 检索 +- 影响检索效率和精度 + +### 1.2 Intent 分析实现过于简陋 + +**位置**: `cortex-mem-core/src/search/vector_engine.rs:600-644` + +当前 `detect_intent_type` 方法使用硬编码的关键词匹配: + +```rust +fn detect_intent_type(query: &str) -> QueryIntentType { + let lower = query.to_lowercase(); + + // 硬编码的关键词匹配 + if lower.contains("when") || lower.contains("recent") || ... { + return QueryIntentType::Temporal; + } + if lower.starts_with("what is") || lower.starts_with("who is") || ... { + return QueryIntentType::Factual; + } + // ... + QueryIntentType::General +} +``` + +**问题分析**: +1. **中文支持差**:只匹配英文关键词,中文查询几乎全部归类为 General +2. **语义理解缺失**:"告诉我上次我们讨论的技术方案" 无法被识别为 Temporal +3. **实体查询判断粗糙**:`is_likely_entity_query` 仅通过长度和字符类型判断 + +### 1.3 已有资源未被利用 + +**位置**: `cortex-mem-core/src/llm/prompts.rs:106-126` + +存在一个完整的 `intent_analysis` prompt,但从未被调用: + +```rust +/// Prompt for intent analysis in retrieval +pub fn intent_analysis(query: &str) -> String { + format!( + r#"Analyze the following query and extract: +1. **Keywords**: Important keywords for search (2-5 words) +2. **Entities**: Named entities mentioned (people, places, technologies) +3. **Time Range**: Any time-related constraints (if mentioned) +4. **Query Type**: The type of query (factual, procedural, conceptual, etc.) +... +``` + +**技术债务**:已有完善的 LLM intent 分析 prompt,却用简陋的关键词匹配替代。 + +### 1.4 实体识别与召回链路断裂 + +当前流程中,实体信息仅在记忆提取时识别,但在召回时未利用: + +``` +记忆存储时:识别实体 → 存入 metadata.entities +记忆召回时:查询 → 嵌入 → 向量相似度 → 返回结果 + ↑ + 未利用实体信息 +``` + +## 2. 解决方案 + +### 2.1 架构设计 + +``` +┌─────────────────────────────────────────────────────────────────────────────┐ +│ 实体感知召回架构 │ +├─────────────────────────────────────────────────────────────────────────────┤ +│ │ +│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │ +│ │ 用户查询 │───►│ LLM Intent │───►│ 实体增强 │───►│ 混合检索 │ │ +│ │ │ │ 分析器 │ │ 查询扩展 │ │ 策略选择 │ │ +│ └─────────────┘ └─────────────┘ └─────────────┘ └─────────────┘ │ +│ │ │ │ │ +│ ▼ ▼ ▼ │ +│ ┌──────────────────────────────────────────────────┐ │ +│ │ 检索执行层 │ │ +│ │ ┌─────────┐ ┌─────────┐ ┌─────────┐ │ │ +│ │ │实体过滤 │ │三层级联 │ │语义搜索 │ │ │ +│ │ │召回 │ │召回 │ │召回 │ │ │ +│ │ └─────────┘ └─────────┘ └─────────┘ │ │ +│ └──────────────────────────────────────────────────┘ │ +│ │ +└─────────────────────────────────────────────────────────────────────────────┘ +``` + +### 2.2 LLM Intent 分析器重构 + +#### 2.2.1 增强 Intent 分析结果结构 + +```rust +// 文件: cortex-mem-core/src/search/query_intent.rs (新建或重构) + +use serde::{Deserialize, Serialize}; + +/// 增强的查询意图分析结果 +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct EnhancedQueryIntent { + /// 原始查询 + pub original_query: String, + + /// 重写后的查询(用于语义搜索) + pub rewritten_query: Option, + + /// 查询类型 + pub query_type: QueryType, + + /// 提取的关键词 + pub keywords: Vec, + + /// 提取的命名实体 + pub entities: ExtractedEntities, + + /// 时间约束 + pub time_constraint: Option, + + /// 检索策略建议 + pub retrieval_strategy: RetrievalStrategy, + + /// 置信度 + pub confidence: f32, +} + +/// 查询类型(扩展版) +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum QueryType { + /// 事实查询:"Python 的创始人是谁?" + Factual, + /// 实体查询:"王明"、"那个项目" + EntityLookup, + /// 时间查询:"上周讨论了什么?" + Temporal, + /// 关系查询:"张三和李四是什么关系?" + Relational, + /// 过程查询:"如何配置环境?" + Procedural, + /// 概念查询:"什么是 RAG?" + Conceptual, + /// 搜索查询:"找一下关于微服务的讨论" + Search, + /// 比较/决策查询:"方案A和方案B哪个更好?" + Comparative, + /// 一般查询 + General, +} + +/// 提取的实体信息 +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct ExtractedEntities { + /// 人名 + pub people: Vec, + /// 组织/公司 + pub organizations: Vec, + /// 项目/产品名 + pub projects: Vec, + /// 技术术语 + pub technologies: Vec, + /// 地点 + pub locations: Vec, + /// 其他实体 + pub other: Vec, +} + +impl ExtractedEntities { + pub fn is_empty(&self) -> bool { + self.people.is_empty() + && self.organizations.is_empty() + && self.projects.is_empty() + && self.technologies.is_empty() + && self.locations.is_empty() + && self.other.is_empty() + } + + pub fn all_entities(&self) -> Vec<&String> { + let mut entities = Vec::new(); + entities.extend(&self.people); + entities.extend(&self.organizations); + entities.extend(&self.projects); + entities.extend(&self.technologies); + entities.extend(&self.locations); + entities.extend(&self.other); + entities + } +} + +/// 时间约束 +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TimeConstraint { + /// 时间表达式原文 + pub expression: String, + /// 解析后的起始时间 + pub start: Option>, + /// 解析后的结束时间 + pub end: Option>, + /// 相对时间描述 + pub relative: Option, // "last_week", "yesterday", "recent" +} + +/// 检索策略建议 +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum RetrievalStrategy { + /// 三层级联检索(默认) + LayeredCascade, + /// 实体优先检索 + EntityFirst, + /// 时间范围过滤优先 + TimeFiltered, + /// 全量语义检索 + FullSemantic, + /// 混合策略 + Hybrid { + entity_weight: f32, + semantic_weight: f32, + time_weight: f32, + }, +} +``` + +#### 2.2.2 LLM Intent 分析器实现 + +```rust +// 文件: cortex-mem-core/src/search/intent_analyzer.rs (新建) + +use crate::llm::{LLMClient, Prompts}; +use crate::search::query_intent::*; +use crate::Result; +use std::sync::Arc; +use serde::Deserialize; + +/// Intent 分析器 +pub struct IntentAnalyzer { + llm_client: Arc, + /// 是否启用 LLM 分析(可配置关闭以节省成本) + enabled: bool, + /// 缓存最近的 intent 分析结果 + cache: Arc>>, +} + +impl IntentAnalyzer { + pub fn new(llm_client: Arc) -> Self { + Self { + llm_client, + enabled: true, + cache: Arc::new(tokio::sync::RwLock::new( + lru::LruCache::new(std::num::NonZeroUsize::new(100).unwrap()) + )), + } + } + + /// 分析查询意图 + pub async fn analyze(&self, query: &str) -> Result { + // 1. 检查缓存 + { + let cache = self.cache.read().await; + if let Some(intent) = cache.peek(query) { + return Ok(intent.clone()); + } + } + + // 2. 使用 LLM 进行深度分析 + let intent = if self.enabled { + self.analyze_with_llm(query).await? + } else { + // 回退到规则引擎 + self.analyze_with_rules(query) + }; + + // 3. 缓存结果 + { + let mut cache = self.cache.write().await; + cache.put(query.to_string(), intent.clone()); + } + + Ok(intent) + } + + async fn analyze_with_llm(&self, query: &str) -> Result { + let prompt = Self::build_intent_prompt(query); + + let response = self.llm_client.complete_with_system( + "你是一个专业的查询意图分析器,帮助理解用户在记忆系统中的检索意图。", + &prompt + ).await?; + + // 解析 LLM 响应 + self.parse_llm_response(query, &response) + } + + fn build_intent_prompt(query: &str) -> String { + format!( + r#"分析以下查询,提取关键信息用于记忆检索系统。 + +## 输入查询 +{} + +## 分析任务 +请识别以下信息: + +1. **查询类型**:从以下类型中选择最匹配的 + - factual: 事实性问题("谁"、"什么"、"多少") + - entity_lookup: 查找特定实体(人名、项目名等) + - temporal: 有时间约束的查询 + - relational: 查询实体间关系 + - procedural: 操作步骤类问题 + - conceptual: 概念解释类 + - search: 搜索类请求 + - comparative: 比较/决策类 + - general: 通用查询 + +2. **命名实体**:提取人名、组织、项目、技术术语、地点等 + +3. **时间约束**:识别时间表达式并解析 + +4. **关键词**:提取 2-5 个最重要的搜索关键词 + +5. **查询重写**:如果原查询模糊,生成一个更明确的版本 + +6. **检索策略建议**:推荐最合适的检索方法 + +## 输出格式(JSON) +{{ + "query_type": "类型名称", + "entities": {{ + "people": ["人名1"], + "organizations": ["组织名"], + "projects": ["项目名"], + "technologies": ["技术术语"], + "locations": ["地点"], + "other": ["其他实体"] + }}, + "time_constraint": {{ + "expression": "时间原文", + "relative": "相对时间(如 last_week)" + }}, + "keywords": ["关键词1", "关键词2"], + "rewritten_query": "重写后的查询(如果原查询清晰则为 null)", + "retrieval_strategy": {{ + "type": "策略名称", + "params": {{}} + }}, + "confidence": 0.95 +}} + +## 注意事项 +- 中文查询请用中文分析 +- 实体识别要准确,不确定的放入 other +- 时间表达式要尽量解析为具体范围 +- confidence 反映你对分析的确定程度 + +请直接输出 JSON,不要有其他内容。"#, + query + ) + } + + fn parse_llm_response(&self, query: &str, response: &str) -> Result { + // 提取 JSON + let json_str = Self::extract_json(response); + + // 解析 + let parsed: LLMIntentResponse = serde_json::from_str(json_str) + .map_err(|e| crate::Error::Search(format!("Failed to parse intent: {}", e)))?; + + Ok(EnhancedQueryIntent { + original_query: query.to_string(), + rewritten_query: parsed.rewritten_query, + query_type: parsed.query_type, + keywords: parsed.keywords, + entities: parsed.entities, + time_constraint: parsed.time_constraint, + retrieval_strategy: self.parse_strategy(&parsed.retrieval_strategy), + confidence: parsed.confidence, + }) + } + + fn extract_json(response: &str) -> &str { + let trimmed = response.trim(); + if let Some(start) = trimmed.find('{') { + let mut depth = 0; + for (i, c) in trimmed[start..].char_indices() { + match c { + '{' => depth += 1, + '}' => { + depth -= 1; + if depth == 0 { + return &trimmed[start..start + i + 1]; + } + } + _ => {} + } + } + } + trimmed + } + + fn parse_strategy(&self, strategy: &StrategyResponse) -> RetrievalStrategy { + match strategy.type_.as_str() { + "entity_first" => RetrievalStrategy::EntityFirst, + "time_filtered" => RetrievalStrategy::TimeFiltered, + "full_semantic" => RetrievalStrategy::FullSemantic, + "hybrid" => { + let params = &strategy.params; + RetrievalStrategy::Hybrid { + entity_weight: params.get("entity_weight").and_then(|v| v.as_f64()).unwrap_or(0.4) as f32, + semantic_weight: params.get("semantic_weight").and_then(|v| v.as_f64()).unwrap_or(0.4) as f32, + time_weight: params.get("time_weight").and_then(|v| v.as_f64()).unwrap_or(0.2) as f32, + } + } + _ => RetrievalStrategy::LayeredCascade, + } + } + + /// 规则引擎回退(当 LLM 不可用时) + fn analyze_with_rules(&self, query: &str) -> EnhancedQueryIntent { + let mut intent = EnhancedQueryIntent { + original_query: query.to_string(), + rewritten_query: None, + query_type: QueryType::General, + keywords: Vec::new(), + entities: ExtractedEntities::default(), + time_constraint: None, + retrieval_strategy: RetrievalStrategy::LayeredCascade, + confidence: 0.5, + }; + + // 简单的规则匹配 + let lower = query.to_lowercase(); + + // 时间关键词检测 + let time_keywords = ["上次", "昨天", "上周", "最近", "last", "yesterday", "week", "recent"]; + if time_keywords.iter().any(|k| lower.contains(k)) { + intent.query_type = QueryType::Temporal; + intent.retrieval_strategy = RetrievalStrategy::TimeFiltered; + } + + // 实体查询检测 + if query.chars().count() <= 6 && !query.contains(' ') && !query.contains('?') { + intent.query_type = QueryType::EntityLookup; + intent.retrieval_strategy = RetrievalStrategy::EntityFirst; + // 假设短查询是实体名 + intent.entities.other.push(query.to_string()); + } + + // 提取关键词 + intent.keywords = query + .split_whitespace() + .filter(|w| w.len() > 1) + .map(|s| s.to_lowercase()) + .take(5) + .collect(); + + intent + } +} + +// LLM 响应结构 +#[derive(Debug, Deserialize)] +struct LLMIntentResponse { + #[serde(rename = "query_type")] + query_type: QueryType, + entities: ExtractedEntities, + time_constraint: Option, + keywords: Vec, + rewritten_query: Option, + retrieval_strategy: StrategyResponse, + confidence: f32, +} + +#[derive(Debug, Deserialize)] +struct StrategyResponse { + #[serde(rename = "type")] + type_: String, + params: serde_json::Map, +} +``` + +### 2.3 L0 抽象层实体保留增强 + +#### 2.3.1 改进 L0 生成 Prompt + +```rust +// 文件: cortex-mem-core/src/llm/prompts.rs (修改) + +impl Prompts { + /// Prompt for generating L0 abstract with entity preservation + pub fn abstract_generation_with_entities(content: &str, known_entities: &[String]) -> String { + let entities_hint = if known_entities.is_empty() { + String::new() + } else { + format!( + "\n**已知实体(必须保留)**:{}\n", + known_entities.join(", ") + ) + }; + + format!( + r#"生成一个精炼的摘要(约100个token),用于快速相关性检测。 + +## 要求 +1. **实体优先**:必须包含内容中提到的所有重要实体(人名、项目名、技术术语等) +2. **信息广度**:覆盖多个关键方面,而非深入单一主题 +3. **紧凑表达**:使用"讨论了X、Y和Z"而非冗长描述 +4. **语言一致**:使用与输入内容相同的语言 +{} +## 内容 +{} + +## 摘要(约100 token)"#, + entities_hint, content + ) + } + + /// 从 L1 概览中提取实体列表 + pub fn entity_extraction_from_overview(overview: &str) -> String { + format!( + r#"从以下概览中提取所有命名实体。 + +## 输出格式(JSON) +{{ + "people": ["人名"], + "organizations": ["组织名"], + "projects": ["项目名"], + "technologies": ["技术术语"], + "locations": ["地点"], + "other": ["其他实体"] +}} + +## 概览内容 +{} + +## 实体列表(JSON)"#, + overview + ) + } +} +``` + +#### 2.3.2 层级生成器改进 + +```rust +// 文件: cortex-mem-core/src/layers/generator.rs (修改) + +impl AbstractGenerator { + /// 生成 L0 抽象(带实体保留) + pub async fn generate_with_entities( + &self, + content: &str, + llm: &Arc, + existing_entities: Option<&[String]>, + ) -> Result { + info!("Generating L0 Abstract with entity preservation"); + + // 1. 如果提供了已有实体,使用增强 prompt + let prompt = if let Some(entities) = existing_entities { + Prompts::abstract_generation_with_entities(content, entities) + } else { + Prompts::abstract_generation(content) + }; + + // 2. 调用 LLM + let abstract_text = llm.complete_with_system( + "你是专业的摘要生成器,擅长在有限篇幅内保留关键实体信息。", + &prompt + ).await?; + + // 3. 从摘要中提取实体(用于后续索引) + let entities = self.extract_entities_from_abstract(&abstract_text, llm).await?; + + Ok(AbstractResult { + content: abstract_text, + entities, + token_count: Self::estimate_tokens(&abstract_text), + }) + } + + async fn extract_entities_from_abstract( + &self, + abstract_text: &str, + llm: &Arc, + ) -> Result> { + let prompt = format!( + r#"从以下摘要中提取所有命名实体(人名、组织、项目、技术术语等)。 + +摘要: +{} + +输出格式(JSON数组): +["实体1", "实体2", ...]"#, + abstract_text + ); + + let response = llm.complete(&prompt).await?; + + // 解析 JSON 数组 + let json_str = response.trim() + .trim_start_matches('[') + .trim_end_matches(']'); + + if json_str.is_empty() { + return Ok(Vec::new()); + } + + serde_json::from_str(&format!("[{}]", json_str)) + .map_err(|e| crate::Error::Llm(format!("Failed to parse entities: {}", e))) + } +} + +#[derive(Debug, Clone)] +pub struct AbstractResult { + pub content: String, + pub entities: Vec, + pub token_count: usize, +} +``` + +### 2.4 实体感知检索策略 + +#### 2.4.1 混合检索执行器 + +```rust +// 文件: cortex-mem-core/src/search/retrieval_executor.rs (新建) + +use crate::search::query_intent::*; +use crate::search::{VectorSearchEngine, SearchOptions, SearchResult}; +use crate::vector_store::QdrantVectorStore; +use crate::Result; +use std::sync::Arc; +use tracing::{info, debug}; + +/// 检索执行器 +pub struct RetrievalExecutor { + vector_engine: Arc, + vector_store: Arc, +} + +impl RetrievalExecutor { + pub fn new( + vector_engine: Arc, + vector_store: Arc, + ) -> Self { + Self { + vector_engine, + vector_store, + } + } + + /// 根据策略执行检索 + pub async fn execute( + &self, + query: &str, + intent: &EnhancedQueryIntent, + options: &SearchOptions, + ) -> Result> { + info!("Executing retrieval with strategy: {:?}", intent.retrieval_strategy); + + match &intent.retrieval_strategy { + RetrievalStrategy::EntityFirst => { + self.entity_first_search(query, intent, options).await + } + RetrievalStrategy::TimeFiltered => { + self.time_filtered_search(query, intent, options).await + } + RetrievalStrategy::FullSemantic => { + self.full_semantic_search(query, intent, options).await + } + RetrievalStrategy::Hybrid { entity_weight, semantic_weight, time_weight } => { + self.hybrid_search(query, intent, options, *entity_weight, *semantic_weight, *time_weight).await + } + RetrievalStrategy::LayeredCascade => { + // 默认三层级联检索 + self.vector_engine.layered_semantic_search(query, options).await + } + } + } + + /// 实体优先检索 + async fn entity_first_search( + &self, + query: &str, + intent: &EnhancedQueryIntent, + options: &SearchOptions, + ) -> Result> { + let all_entities = intent.entities.all_entities(); + + if all_entities.is_empty() { + // 没有实体,回退到语义搜索 + return self.vector_engine.semantic_search(query, options).await; + } + + debug!("Entity-first search for entities: {:?}", all_entities); + + // 1. 先用实体过滤检索 + let entity_filters = self.build_entity_filters(&intent.entities); + let entity_results = self.vector_store + .search_by_entities(&entity_filters, options.limit * 2) + .await?; + + // 2. 再用语义相似度排序 + let query_embedding = self.vector_engine.get_embedding(query).await?; + + let mut scored_results: Vec<_> = entity_results + .into_iter() + .map(|mem| { + let semantic_score = cosine_similarity(&query_embedding, &mem.embedding); + let entity_match_score = self.calculate_entity_match_score(&mem, &intent.entities); + + // 综合评分:实体匹配 40% + 语义相似 60% + let combined = entity_match_score * 0.4 + semantic_score * 0.6; + + SearchResult { + uri: mem.metadata.uri.unwrap_or(mem.id), + score: combined, + snippet: extract_snippet(&mem.content, query), + content: Some(mem.content), + } + }) + .collect(); + + // 3. 排序并返回 + scored_results.sort_by(|a, b| b.score.partial_cmp(&a.score).unwrap()); + scored_results.truncate(options.limit); + + Ok(scored_results) + } + + /// 时间过滤检索 + async fn time_filtered_search( + &self, + query: &str, + intent: &EnhancedQueryIntent, + options: &SearchOptions, + ) -> Result> { + let time_filter = match &intent.time_constraint { + Some(tc) => tc, + None => return self.vector_engine.layered_semantic_search(query, options).await, + }; + + debug!("Time-filtered search for: {:?}", time_filter); + + // 构建时间范围过滤器 + let mut filters = crate::types::Filters::default(); + + if let Some(start) = time_filter.start { + filters.created_after = Some(start); + } + if let Some(end) = time_filter.end { + filters.created_before = Some(end); + } + + // 执行带时间过滤的搜索 + self.vector_engine.filtered_semantic_search(query, &filters, options).await + } + + /// 混合检索 + async fn hybrid_search( + &self, + query: &str, + intent: &EnhancedQueryIntent, + options: &SearchOptions, + entity_weight: f32, + semantic_weight: f32, + time_weight: f32, + ) -> Result> { + let mut candidates: std::collections::HashMap = + std::collections::HashMap::new(); + + // 1. 实体检索分支 + if entity_weight > 0.0 && !intent.entities.all_entities().is_empty() { + let entity_results = self.entity_first_search(query, intent, &SearchOptions { + limit: options.limit * 2, + ..options.clone() + }).await?; + + for result in entity_results { + let entry = candidates.entry(result.uri.clone()).or_default(); + entry.uri = result.uri.clone(); + entry.entity_score = Some(result.score); + entry.content = result.content.clone(); + } + } + + // 2. 语义检索分支 + if semantic_weight > 0.0 { + let semantic_results = self.vector_engine.layered_semantic_search(query, &SearchOptions { + limit: options.limit * 2, + ..options.clone() + }).await?; + + for result in semantic_results { + let entry = candidates.entry(result.uri.clone()).or_default(); + entry.uri = result.uri.clone(); + entry.semantic_score = Some(result.score); + entry.content = result.content.clone(); + } + } + + // 3. 时间过滤分支 + if time_weight > 0.0 && intent.time_constraint.is_some() { + let time_results = self.time_filtered_search(query, intent, &SearchOptions { + limit: options.limit * 2, + ..options.clone() + }).await?; + + for result in time_results { + let entry = candidates.entry(result.uri.clone()).or_default(); + entry.uri = result.uri.clone(); + entry.time_score = Some(result.score); + entry.content = result.content.clone(); + } + } + + // 4. 综合评分 + let mut final_results: Vec<_> = candidates + .into_values() + .map(|c| { + let entity = c.entity_score.unwrap_or(0.0) * entity_weight; + let semantic = c.semantic_score.unwrap_or(0.0) * semantic_weight; + let time = c.time_score.unwrap_or(0.0) * time_weight; + let total = entity + semantic + time; + + SearchResult { + uri: c.uri, + score: total, + snippet: String::new(), + content: c.content, + } + }) + .filter(|r| r.score >= options.threshold) + .collect(); + + final_results.sort_by(|a, b| b.score.partial_cmp(&a.score).unwrap()); + final_results.truncate(options.limit); + + Ok(final_results) + } + + fn build_entity_filters(&self, entities: &ExtractedEntities) -> EntityFilters { + EntityFilters { + people: entities.people.clone(), + organizations: entities.organizations.clone(), + projects: entities.projects.clone(), + technologies: entities.technologies.clone(), + } + } + + fn calculate_entity_match_score(&self, memory: &crate::types::Memory, entities: &ExtractedEntities) -> f32 { + let metadata_entities = &memory.metadata.entities; + let query_entities = entities.all_entities(); + + if query_entities.is_empty() { + return 0.5; + } + + let matches = query_entities + .iter() + .filter(|qe| metadata_entities.iter().any(|me| me.to_lowercase() == qe.to_lowercase())) + .count(); + + matches as f32 / query_entities.len() as f32 + } +} + +#[derive(Default)] +struct HybridCandidate { + uri: String, + entity_score: Option, + semantic_score: Option, + time_score: Option, + content: Option, +} + +fn cosine_similarity(a: &[f32], b: &[f32]) -> f32 { + let dot: f32 = a.iter().zip(b.iter()).map(|(x, y)| x * y).sum(); + let mag_a: f32 = a.iter().map(|x| x * x).sum::().sqrt(); + let mag_b: f32 = b.iter().map(|x| x * x).sum::().sqrt(); + if mag_a == 0.0 || mag_b == 0.0 { 0.0 } else { dot / (mag_a * mag_b) } +} + +fn extract_snippet(content: &str, query: &str) -> String { + // 复用现有的 snippet 提取逻辑 + if content.chars().count() > 200 { + format!("{}...", content.chars().take(200).collect::()) + } else { + content.to_string() + } +} +``` + +### 2.5 集成到现有搜索引擎 + +```rust +// 文件: cortex-mem-core/src/search/vector_engine.rs (修改) + +impl VectorSearchEngine { + /// 智能检索入口(使用 Intent 分析) + pub async fn smart_search( + &self, + query: &str, + options: &SearchOptions, + intent_analyzer: Option<&IntentAnalyzer>, + ) -> Result> { + // 1. Intent 分析 + let intent = if let Some(analyzer) = intent_analyzer { + analyzer.analyze(query).await? + } else { + // 回退到旧的简单分析 + self.simple_intent_analysis(query).await + }; + + info!( + "Smart search intent: type={:?}, strategy={:?}, entities={}", + intent.query_type, + intent.retrieval_strategy, + intent.entities.all_entities().len() + ); + + // 2. 根据策略选择检索路径 + let search_query = intent.rewritten_query.as_deref().unwrap_or(query); + + // 3. 执行检索 + match &intent.retrieval_strategy { + RetrievalStrategy::EntityFirst => { + // 使用实体优先检索 + self.entity_aware_search(search_query, &intent, options).await + } + _ => { + // 使用增强的三层级联检索 + self.enhanced_layered_search(search_query, &intent, options).await + } + } + } + + /// 实体感知的层级检索 + async fn enhanced_layered_search( + &self, + query: &str, + intent: &EnhancedQueryIntent, + options: &SearchOptions, + ) -> Result> { + // 调用现有的 layered_semantic_search,但使用 intent 中的信息 + let mut enhanced_options = options.clone(); + + // 根据意图类型调整阈值 + enhanced_options.threshold = match intent.query_type { + QueryType::EntityLookup => 0.3, // 实体查询降低阈值 + QueryType::Temporal => 0.35, + _ => options.threshold, + }; + + self.layered_semantic_search(query, &enhanced_options).await + } +} +``` + +## 3. 配置支持 + +```toml +# config.toml 新增配置 + +[search] +# Intent 分析模式 +intent_analysis_mode = "llm" # llm, rules, hybrid + +# Intent 分析缓存 +intent_cache_size = 100 + +# 实体检索权重 +[search.entity_retrieval] +enabled = true +entity_match_weight = 0.4 +semantic_weight = 0.6 + +# L0 实体保留 +[search.l0_entity_preservation] +enabled = true +min_entities = 3 +``` + +## 4. 实现计划 + +| 步骤 | 任务 | 依赖 | +|------|------|------| +| 1 | 实现 EnhancedQueryIntent 结构 | 无 | +| 2 | 实现 IntentAnalyzer(含 LLM 分析) | 步骤1 | +| 3 | 修改 L0 生成器,添加实体保留 | 无 | +| 4 | 实现 RetrievalExecutor | 步骤1, 2 | +| 5 | 集成到 VectorSearchEngine | 步骤2, 4 | +| 6 | 添加配置支持 | 全部 | + +## 5. 预期收益 + +| 场景 | 当前表现 | 优化后 | 提升 | +|------|----------|--------|------| +| "王明是谁" | 召回失败或低分 | 高召回率 | +60%+ | +| "上周讨论的技术方案" | 归类错误 | 准确识别+时间过滤 | +40% | +| 中文意图识别 | ~30% 准确 | ~85% 准确 | +55pp | +| LLM 开销 | 无 | 每次查询 1 次 | 可控 | + +## 6. 技术债务清理 + +1. **删除** `detect_intent_type` 方法中的硬编码关键词匹配 +2. **启用** 已存在但未使用的 `Prompts::intent_analysis` +3. **统一** Intent 分析逻辑,移除重复代码 +4. **添加** 完善的单元测试和集成测试 diff --git a/docs_plan_v2.6/3_memory_forgetting_mechanism.md b/docs_plan_v2.6/3_memory_forgetting_mechanism.md new file mode 100644 index 0000000..cde500c --- /dev/null +++ b/docs_plan_v2.6/3_memory_forgetting_mechanism.md @@ -0,0 +1,863 @@ +# 记忆遗忘机制设计技术方案 + +## 1. 问题分析 + +### 1.1 现有元数据未被充分利用 + +**位置**: `cortex-mem-core/src/memory_index.rs` + +当前 `MemoryMetadata` 已定义了访问统计字段,但未实现基于此的衰减或遗忘逻辑: + +```rust +pub struct MemoryMetadata { + pub last_accessed: DateTime, // 有字段 + pub access_count: u32, // 有字段 + pub confidence: f32, // 有字段 + // 缺少: + // - importance_decay (重要性衰减) + // - ttl (存活时间) + // - last_updated (内容更新时间) + // - memory_phase (记忆阶段) +} +``` + +### 1.2 长期运行的问题 + +对于持续数月的 Coding Agent(如 Cursor、Claude Code),会积累大量记忆: + +| 时间跨度 | 预估记忆量 | 问题 | +|----------|------------|------| +| 1 个月 | ~500 条 | 检索效率下降 | +| 3 个月 | ~1500 条 | 召回精度下降,噪声增加 | +| 6 个月 | ~3000 条 | 存储膨胀,过时信息干扰 | +| 1 年 | ~6000 条 | 系统性能显著下降 | + +### 1.3 过时信息的危害 + +1. **召回噪声**:已废弃的项目配置、过时的技术决策干扰当前查询 +2. **存储浪费**:无价值信息占用向量索引空间 +3. **上下文污染**:Agent 可能基于过时信息做出错误决策 + +## 2. 理论基础 + +### 2.1 艾宾浩斯遗忘曲线 + +记忆强度随时间自然衰减,呈现指数曲线: + +``` +记忆强度 = 初始强度 × e^(-t/τ) + +其中: +- t: 距上次访问的时间 +- τ: 时间常数(半衰期) +``` + +### 2.2 记忆巩固理论 + +频繁访问的记忆会被"巩固",衰减速度减慢: + +``` +衰减速度 ∝ 1 / log(1 + access_count) +``` + +### 2.3 工作记忆与长期记忆 + +``` +┌─────────────────────────────────────────────────────────────────────────────┐ +│ 记忆生命周期模型 │ +├─────────────────────────────────────────────────────────────────────────────┤ +│ │ +│ 新记忆 ──► 工作记忆 ──► 短期记忆 ──► 长期记忆 ──► 归档 │ +│ │ │ │ │ │ │ +│ │ │ │ │ │ │ +│ │ 高频访问 中频访问 低频访问 极少访问 │ +│ │ 快速衰减 中速衰减 慢速衰减 仅查询可见 │ +│ │ TTL: 7天 TTL: 30天 TTL: 90天 永久保留 │ +│ │ │ │ │ │ │ +│ └──────────┴────────────┴────────────┴────────────┘ │ +│ │ │ +│ ▼ │ +│ 自动遗忘清理 │ +│ │ +└─────────────────────────────────────────────────────────────────────────────┘ +``` + +## 3. 解决方案 + +### 3.1 增强的记忆元数据 + +```rust +// 文件: cortex-mem-core/src/memory_index.rs (修改) + +use serde::{Deserialize, Serialize}; +use chrono::{DateTime, Utc}; + +/// 记忆阶段 +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +pub enum MemoryPhase { + /// 工作记忆:当前会话频繁使用 + Working, + /// 短期记忆:近期使用,可能过渡到长期 + ShortTerm, + /// 长期记忆:已巩固,衰减慢 + LongTerm, + /// 归档记忆:极少访问,仅特定查询可见 + Archived, +} + +impl Default for MemoryPhase { + fn default() -> Self { + MemoryPhase::Working + } +} + +/// 遗忘策略配置 +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ForgettingConfig { + /// 工作记忆半衰期(天) + pub working_half_life: f32, + /// 短期记忆半衰期(天) + pub short_term_half_life: f32, + /// 长期记忆半衰期(天) + pub long_term_half_life: f32, + /// 巩固阈值:访问次数达到此值后进入下一阶段 + pub consolidation_threshold: u32, + /// 遗忘阈值:强度低于此值的记忆将被清理 + pub forgetting_threshold: f32, + /// 归档阈值:强度低于此值进入归档 + pub archive_threshold: f32, + /// 是否启用自动清理 + pub auto_cleanup_enabled: bool, + /// 清理周期(小时) + pub cleanup_interval_hours: u64, +} + +impl Default for ForgettingConfig { + fn default() -> Self { + Self { + working_half_life: 7.0, // 7 天半衰期 + short_term_half_life: 30.0, // 30 天半衰期 + long_term_half_life: 180.0, // 180 天半衰期 + consolidation_threshold: 5, // 访问 5 次后巩固 + forgetting_threshold: 0.1, // 强度低于 0.1 删除 + archive_threshold: 0.3, // 强度低于 0.3 归档 + auto_cleanup_enabled: true, + cleanup_interval_hours: 24, + } + } +} + +/// 增强的记忆元数据 +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct EnhancedMemoryMetadata { + // === 原有字段 === + pub id: String, + pub file: String, + pub memory_type: MemoryType, + pub key: String, + pub content_hash: String, + pub source_sessions: Vec, + pub created_at: DateTime, + pub updated_at: DateTime, + pub last_accessed: DateTime, + pub access_count: u32, + pub confidence: f32, + pub content_summary: String, + + // === 新增字段 === + /// 记忆阶段 + pub phase: MemoryPhase, + /// 重要性评分(用户可手动调整) + pub importance: f32, + /// 衰减后的当前强度 + pub current_strength: f32, + /// 最后强度计算时间 + pub strength_updated_at: DateTime, + /// 是否标记为重要(永不遗忘) + pub is_pinned: bool, + /// 自定义 TTL(秒),None 表示使用默认 + pub custom_ttl: Option, + /// 关联的其他记忆 ID + pub related_memories: Vec, + /// 标签(用于分类遗忘) + pub tags: Vec, + /// 内容过期时间(用于时序性信息) + pub expires_at: Option>, +} + +impl EnhancedMemoryMetadata { + /// 创建新记忆 + pub fn new( + id: String, + file: String, + memory_type: MemoryType, + key: String, + content_hash: String, + source_session: &str, + confidence: f32, + content_summary: String, + ) -> Self { + let now = Utc::now(); + Self { + id, + file, + memory_type, + key, + content_hash, + source_sessions: vec![source_session.to_string()], + created_at: now, + updated_at: now, + last_accessed: now, + access_count: 0, + confidence, + content_summary, + + // 新字段默认值 + phase: MemoryPhase::Working, + importance: 0.5, + current_strength: 1.0, + strength_updated_at: now, + is_pinned: false, + custom_ttl: None, + related_memories: Vec::new(), + tags: Vec::new(), + expires_at: None, + } + } + + /// 从旧元数据迁移 + pub fn from_legacy(legacy: MemoryMetadata) -> Self { + Self { + id: legacy.id, + file: legacy.file, + memory_type: legacy.memory_type, + key: legacy.key, + content_hash: legacy.content_hash, + source_sessions: legacy.source_sessions, + created_at: legacy.created_at, + updated_at: legacy.updated_at, + last_accessed: legacy.last_accessed, + access_count: legacy.access_count, + confidence: legacy.confidence, + content_summary: legacy.content_summary, + + phase: Self::infer_phase_from_access(legacy.access_count), + importance: legacy.confidence, + current_strength: 1.0, + strength_updated_at: Utc::now(), + is_pinned: false, + custom_ttl: None, + related_memories: Vec::new(), + tags: Vec::new(), + expires_at: None, + } + } + + fn infer_phase_from_access(access_count: u32) -> MemoryPhase { + match access_count { + 0..=2 => MemoryPhase::Working, + 3..=10 => MemoryPhase::ShortTerm, + _ => MemoryPhase::LongTerm, + } + } +} +``` + +### 3.2 遗忘计算引擎 + +```rust +// 文件: cortex-mem-core/src/forgetting/engine.rs (新建) + +use crate::memory_index::{EnhancedMemoryMetadata, MemoryPhase, ForgettingConfig}; +use chrono::{DateTime, Utc}; +use std::sync::Arc; + +/// 遗忘计算引擎 +pub struct ForgettingEngine { + config: ForgettingConfig, +} + +impl ForgettingEngine { + pub fn new(config: ForgettingConfig) -> Self { + Self { config } + } + + /// 计算记忆的当前强度 + /// + /// 强度 = 基础强度 × 衰减因子 × 巩固加成 + pub fn calculate_strength(&self, memory: &EnhancedMemoryMetadata) -> f32 { + if memory.is_pinned { + return 1.0; // 固定的记忆永不衰减 + } + + // 1. 计算时间衰减 + let days_since_access = (Utc::now() - memory.last_accessed).num_days() as f32; + let half_life = self.get_half_life(memory.phase); + let decay_factor = (-days_since_access / half_life).exp(); + + // 2. 计算巩固加成(访问次数越多,衰减越慢) + let consolidation_bonus = 1.0 + (memory.access_count as f32).ln().max(0.0) * 0.1; + + // 3. 计算重要性加成 + let importance_factor = memory.importance * 0.5 + 0.5; // 0.5 ~ 1.0 + + // 4. 综合强度 + let strength = memory.confidence * decay_factor * consolidation_bonus * importance_factor; + + // 5. 检查是否过期 + if let Some(expires_at) = memory.expires_at { + if Utc::now() > expires_at { + return 0.0; // 已过期 + } + } + + strength.clamp(0.0, 1.0) + } + + /// 获取对应阶段的半衰期 + fn get_half_life(&self, phase: MemoryPhase) -> f32 { + match phase { + MemoryPhase::Working => self.config.working_half_life, + MemoryPhase::ShortTerm => self.config.short_term_half_life, + MemoryPhase::LongTerm => self.config.long_term_half_life, + MemoryPhase::Archived => self.config.long_term_half_life * 2.0, + } + } + + /// 判断记忆是否应该被遗忘 + pub fn should_forget(&self, memory: &EnhancedMemoryMetadata) -> bool { + if memory.is_pinned { + return false; + } + + let strength = self.calculate_strength(memory); + strength < self.config.forgetting_threshold + } + + /// 判断记忆是否应该归档 + pub fn should_archive(&self, memory: &EnhancedMemoryMetadata) -> bool { + if memory.is_pinned || memory.phase == MemoryPhase::Archived { + return false; + } + + let strength = self.calculate_strength(memory); + strength < self.config.archive_threshold && strength >= self.config.forgetting_threshold + } + + /// 判断记忆是否应该巩固(提升阶段) + pub fn should_consolidate(&self, memory: &EnhancedMemoryMetadata) -> Option { + if memory.access_count >= self.config.consolidation_threshold { + match memory.phase { + MemoryPhase::Working => Some(MemoryPhase::ShortTerm), + MemoryPhase::ShortTerm => Some(MemoryPhase::LongTerm), + MemoryPhase::LongTerm | MemoryPhase::Archived => None, + } + } else { + None + } + } + + /// 更新记忆的强度和阶段 + pub fn update_memory_state(&self, memory: &mut EnhancedMemoryMetadata) -> MemoryStateChange { + let mut changes = MemoryStateChange::default(); + + // 计算新强度 + let new_strength = self.calculate_strength(memory); + let old_strength = memory.current_strength; + memory.current_strength = new_strength; + memory.strength_updated_at = Utc::now(); + + if (new_strength - old_strength).abs() > 0.01 { + changes.strength_changed = true; + } + + // 检查巩固 + if let Some(new_phase) = self.should_consolidate(memory) { + memory.phase = new_phase; + changes.phase_changed = true; + changes.new_phase = Some(new_phase); + } + + // 检查归档 + if self.should_archive(memory) { + memory.phase = MemoryPhase::Archived; + changes.phase_changed = true; + changes.new_phase = Some(MemoryPhase::Archived); + changes.archived = true; + } + + // 检查遗忘 + changes.should_forget = self.should_forget(memory); + + changes + } +} + +#[derive(Debug, Clone, Default)] +pub struct MemoryStateChange { + pub strength_changed: bool, + pub phase_changed: bool, + pub new_phase: Option, + pub archived: bool, + pub should_forget: bool, +} + +/// 遗忘操作类型 +#[derive(Debug, Clone)] +pub enum ForgettingAction { + /// 删除记忆 + Delete { memory_id: String }, + /// 归档记忆 + Archive { memory_id: String }, + /// 巩固记忆(提升阶段) + Consolidate { memory_id: String, new_phase: MemoryPhase }, + /// 更新强度 + UpdateStrength { memory_id: String, new_strength: f32 }, + /// 无操作 + None, +} +``` + +### 3.3 记忆清理服务 + +```rust +// 文件: cortex-mem-core/src/forgetting/cleanup_service.rs (新建) + +use crate::forgetting::{ForgettingEngine, ForgettingAction, ForgettingConfig}; +use crate::memory_index_manager::MemoryIndexManager; +use crate::memory_events::{MemoryEvent, ChangeType, DeleteReason}; +use crate::vector_store::QdrantVectorStore; +use crate::filesystem::{CortexFilesystem, FilesystemOperations}; +use crate::Result; +use std::sync::Arc; +use tokio::sync::mpsc; +use tracing::{info, warn, debug}; + +/// 记忆清理服务 +pub struct MemoryCleanupService { + engine: ForgettingEngine, + index_manager: Arc, + vector_store: Arc, + filesystem: Arc, + event_tx: mpsc::UnboundedSender, + config: ForgettingConfig, +} + +/// 清理统计 +#[derive(Debug, Clone, Default)] +pub struct CleanupStats { + pub total_scanned: usize, + pub deleted: usize, + pub archived: usize, + pub consolidated: usize, + pub strength_updated: usize, + pub errors: usize, +} + +impl MemoryCleanupService { + pub fn new( + config: ForgettingConfig, + index_manager: Arc, + vector_store: Arc, + filesystem: Arc, + event_tx: mpsc::UnboundedSender, + ) -> Self { + Self { + engine: ForgettingEngine::new(config.clone()), + index_manager, + vector_store, + filesystem, + event_tx, + config, + } + } + + /// 启动定期清理任务 + pub fn start_background_cleanup(self: Arc) -> tokio::task::JoinHandle<()> { + tokio::spawn(async move { + let interval = std::time::Duration::from_secs(self.config.cleanup_interval_hours * 3600); + + loop { + tokio::time::sleep(interval).await; + + if !self.config.auto_cleanup_enabled { + continue; + } + + info!("Starting scheduled memory cleanup"); + + match self.run_cleanup().await { + Ok(stats) => { + info!( + "Cleanup completed: {} scanned, {} deleted, {} archived, {} consolidated", + stats.total_scanned, stats.deleted, stats.archived, stats.consolidated + ); + } + Err(e) => { + warn!("Cleanup failed: {}", e); + } + } + } + }) + } + + /// 执行一次清理 + pub async fn run_cleanup(&self) -> Result { + let mut stats = CleanupStats::default(); + + // 1. 扫描所有维度的记忆 + let scopes = [ + (crate::memory_index::MemoryScope::User, "user"), + (crate::memory_index::MemoryScope::Agent, "agent"), + ]; + + for (scope, owner_prefix) in scopes { + let owners = self.index_manager.list_owners(&scope).await?; + + for owner_id in owners { + let owner_stats = self.cleanup_owner_memories(&scope, &owner_id).await?; + stats.total_scanned += owner_stats.total_scanned; + stats.deleted += owner_stats.deleted; + stats.archived += owner_stats.archived; + stats.consolidated += owner_stats.consolidated; + stats.strength_updated += owner_stats.strength_updated; + stats.errors += owner_stats.errors; + } + } + + Ok(stats) + } + + /// 清理特定所有者的记忆 + async fn cleanup_owner_memories( + &self, + scope: &crate::memory_index::MemoryScope, + owner_id: &str, + ) -> Result { + let mut stats = CleanupStats::default(); + + let index = self.index_manager.get_or_create_index(scope, owner_id).await?; + let mut index = index.write().await; + + let memory_ids: Vec = index.memories.keys().cloned().collect(); + stats.total_scanned = memory_ids.len(); + + let mut to_delete = Vec::new(); + let mut to_archive = Vec::new(); + let mut to_consolidate = Vec::new(); + + for memory_id in &memory_ids { + if let Some(memory) = index.memories.get_mut(memory_id) { + let change = self.engine.update_memory_state(memory); + + if change.should_forget { + to_delete.push(memory_id.clone()); + } else if change.archived { + to_archive.push(memory_id.clone()); + } else if let Some(new_phase) = change.new_phase { + to_consolidate.push((memory_id.clone(), new_phase)); + } + + if change.strength_changed { + stats.strength_updated += 1; + } + } + } + + // 执行删除 + for memory_id in &to_delete { + if let Some(memory) = index.memories.remove(memory_id) { + self.delete_memory_files(&memory.file).await?; + self.emit_delete_event(memory_id, &memory.file).await; + stats.deleted += 1; + } + } + + // 归档已在 update_memory_state 中更新了 phase + stats.archived = to_archive.len(); + + // 巩固计数 + stats.consolidated = to_consolidate.len(); + + // 保存更新后的索引 + self.index_manager.save_index(scope, owner_id, &index).await?; + + Ok(stats) + } + + async fn delete_memory_files(&self, file_path: &str) -> Result<()> { + // 从文件系统删除 + let uri = format!("cortex://user/{}", file_path); + if self.filesystem.exists(&uri).await? { + self.filesystem.delete(&uri).await?; + } + + // 从向量存储删除 + self.vector_store.delete_by_uri(&uri).await?; + + Ok(()) + } + + async fn emit_delete_event(&self, memory_id: &str, file_path: &str) { + let event = MemoryEvent::Deleted { + memory_id: memory_id.to_string(), + uri: format!("cortex://user/{}", file_path), + reason: DeleteReason::Expired, + timestamp: chrono::Utc::now(), + }; + + let _ = self.event_tx.send(event); + } +} + +/// 手动遗忘操作 +impl MemoryCleanupService { + /// 标记记忆为重要(永不遗忘) + pub async fn pin_memory(&self, scope: &crate::memory_index::MemoryScope, owner_id: &str, memory_id: &str) -> Result<()> { + let index = self.index_manager.get_or_create_index(scope, owner_id).await?; + let mut index = index.write().await; + + if let Some(memory) = index.memories.get_mut(memory_id) { + memory.is_pinned = true; + memory.current_strength = 1.0; + } + + self.index_manager.save_index(scope, owner_id, &index).await?; + Ok(()) + } + + /// 取消固定 + pub async fn unpin_memory(&self, scope: &crate::memory_index::MemoryScope, owner_id: &str, memory_id: &str) -> Result<()> { + let index = self.index_manager.get_or_create_index(scope, owner_id).await?; + let mut index = index.write().await; + + if let Some(memory) = index.memories.get_mut(memory_id) { + memory.is_pinned = false; + } + + self.index_manager.save_index(scope, owner_id, &index).await?; + Ok(()) + } + + /// 手动删除记忆 + pub async fn delete_memory(&self, scope: &crate::memory_index::MemoryScope, owner_id: &str, memory_id: &str) -> Result<()> { + let index = self.index_manager.get_or_create_index(scope, owner_id).await?; + let mut index = index.write().await; + + if let Some(memory) = index.memories.remove(memory_id) { + self.delete_memory_files(&memory.file).await?; + self.emit_delete_event(memory_id, &memory.file).await; + } + + self.index_manager.save_index(scope, owner_id, &index).await?; + Ok(()) + } + + /// 设置记忆过期时间 + pub async fn set_memory_expiry( + &self, + scope: &crate::memory_index::MemoryScope, + owner_id: &str, + memory_id: &str, + expires_at: Option>, + ) -> Result<()> { + let index = self.index_manager.get_or_create_index(scope, owner_id).await?; + let mut index = index.write().await; + + if let Some(memory) = index.memories.get_mut(memory_id) { + memory.expires_at = expires_at; + } + + self.index_manager.save_index(scope, owner_id, &index).await?; + Ok(()) + } + + /// 手动调整记忆重要性 + pub async fn set_memory_importance( + &self, + scope: &crate::memory_index::MemoryScope, + owner_id: &str, + memory_id: &str, + importance: f32, + ) -> Result<()> { + let index = self.index_manager.get_or_create_index(scope, owner_id).await?; + let mut index = index.write().await; + + if let Some(memory) = index.memories.get_mut(memory_id) { + memory.importance = importance.clamp(0.0, 1.0); + } + + self.index_manager.save_index(scope, owner_id, &index).await?; + Ok(()) + } +} +``` + +### 3.4 检索时考虑记忆强度 + +```rust +// 文件: cortex-mem-core/src/search/vector_engine.rs (修改) + +impl VectorSearchEngine { + /// 在检索结果中加入记忆强度因子 + async fn apply_forgetting_factor(&self, results: &mut [SearchResult]) { + for result in results.iter_mut() { + // 从 metadata 获取记忆强度 + if let Some(strength) = self.get_memory_strength(&result.uri).await { + // 调整分数:原始分数 × 强度因子 + // 使用幂函数使高强度记忆保持高分,低强度记忆显著降权 + let strength_factor = strength.powf(0.5); // 平方根减缓衰减 + result.score *= strength_factor; + } + } + } + + async fn get_memory_strength(&self, uri: &str) -> Option { + // 从索引中获取记忆强度 + // 实现取决于具体的索引查询方式 + None + } +} +``` + +### 3.5 配置支持 + +```toml +# config.toml 新增配置 + +[forgetting] +# 是否启用遗忘机制 +enabled = true + +# 是否启用自动清理 +auto_cleanup = true + +# 清理周期(小时) +cleanup_interval_hours = 24 + +# 半衰期配置(天) +working_half_life = 7 +short_term_half_life = 30 +long_term_half_life = 180 + +# 巩固阈值:访问次数达到此值后进入下一阶段 +consolidation_threshold = 5 + +# 遗忘阈值:强度低于此值的记忆将被清理 +forgetting_threshold = 0.1 + +# 归档阈值:强度低于此值进入归档 +archive_threshold = 0.3 + +# 归档记忆是否参与检索 +include_archived_in_search = false +``` + +### 3.6 CLI 命令扩展 + +```rust +// cortex-mem-cli 新增命令 + +// 记忆管理命令 +cortex-mem-cli memory pin // 标记为重要 +cortex-mem-cli memory unpin // 取消标记 +cortex-mem-cli memory expire --days 30 // 设置过期时间 +cortex-mem-cli memory importance --value 0.9 // 设置重要性 + +// 遗忘管理命令 +cortex-mem-cli forgetting status // 查看遗忘状态统计 +cortex-mem-cli forgetting preview // 预览将要被清理的记忆 +cortex-mem-cli forgetting run // 手动触发清理 +cortex-mem-cli forgetting config // 查看当前配置 +``` + +## 4. 数据迁移方案 + +### 4.1 从现有元数据迁移 + +```rust +// 文件: cortex-mem-core/src/migration/memory_metadata.rs (新建) + +/// 迁移现有的 MemoryMetadata 到 EnhancedMemoryMetadata +pub async fn migrate_memory_indices(data_dir: &Path) -> Result { + let mut stats = MigrationStats::default(); + + // 扫描所有 .memory_index.json 文件 + for entry in walkdir::WalkDir::new(data_dir) + .into_iter() + .filter_map(|e| e.ok()) + .filter(|e| e.file_name() == ".memory_index.json") + { + let path = entry.path(); + match migrate_single_index(path).await { + Ok(s) => stats.merge(s), + Err(e) => { + warn!("Failed to migrate {}: {}", path.display(), e); + stats.errors += 1; + } + } + } + + Ok(stats) +} + +async fn migrate_single_index(path: &Path) -> Result { + // 1. 读取旧格式 + let content = tokio::fs::read_to_string(path).await?; + let old_index: MemoryIndex = serde_json::from_str(&content)?; + + // 2. 转换为新格式 + let new_index = EnhancedMemoryIndex { + version: 2, + scope: old_index.scope, + owner_id: old_index.owner_id, + last_updated: Utc::now(), + memories: old_index.memories + .into_iter() + .map(|(k, v)| (k, EnhancedMemoryMetadata::from_legacy(v))) + .collect(), + session_summaries: old_index.session_summaries, + }; + + // 3. 备份旧文件 + let backup_path = path.with_extension("json.bak"); + tokio::fs::copy(path, &backup_path).await?; + + // 4. 写入新格式 + let new_content = serde_json::to_string_pretty(&new_index)?; + tokio::fs::write(path, new_content).await?; + + Ok(MigrationStats { + migrated: new_index.memories.len(), + errors: 0, + }) +} +``` + +## 5. 实现计划 + +| 步骤 | 任务 | 依赖 | +|------|------|------| +| 1 | 定义 EnhancedMemoryMetadata 和 ForgettingConfig | 无 | +| 2 | 实现 ForgettingEngine 核心算法 | 步骤1 | +| 3 | 实现 MemoryCleanupService | 步骤2 | +| 4 | 修改检索逻辑,考虑记忆强度 | 步骤1 | +| 5 | 添加 CLI 命令 | 步骤3 | +| 6 | 实现数据迁移工具 | 步骤1 | +| 7 | 添加配置支持 | 全部 | + +## 6. 预期收益 + +| 指标 | 优化前 | 优化后 | 提升 | +|------|--------|--------|------| +| 6个月记忆量 | ~3000 条 | ~1500 条 | 50% 减少 | +| 召回噪声率 | ~25% | ~8% | 17pp 下降 | +| 存储空间 | 100% | 60% | 40% 节省 | +| 检索延迟 | 100% | 70% | 30% 提升 | + +## 7. 注意事项 + +1. **默认保守**:首次部署时遗忘阈值设置较高,避免误删重要记忆 +2. **用户可控**:提供 pin/unpin 功能让用户手动保护重要记忆 +3. **审计日志**:记录所有遗忘操作,支持审计和恢复 +4. **灰度发布**:先在测试环境验证,再逐步推广 diff --git a/docs_plan_v2.6/4_dynamic_weight_allocation.md b/docs_plan_v2.6/4_dynamic_weight_allocation.md new file mode 100644 index 0000000..9e5a2d4 --- /dev/null +++ b/docs_plan_v2.6/4_dynamic_weight_allocation.md @@ -0,0 +1,929 @@ +# 动态权重分配技术方案 + +## 1. 问题分析 + +### 1.1 当前静态权重的问题 + +**位置**: `cortex-mem-core/src/search/vector_engine.rs:466-468` + +三层级联检索的权重是硬编码的: + +```rust +// Combined scoring: 0.2*L0 + 0.3*L1 + 0.5*L2 +let combined_score = l0_score * 0.2 + l1_score * 0.3 + l2_score * 0.5; +``` + +**问题分析**: + +1. **无法适应不同查询类型**: + - 实体查询:"王明是谁" → 需要 L2 精确匹配,当前权重合理 + - 语义查询:"讨论了什么技术方案" → L0/L1 更重要,权重应调整 + - 时间查询:"上周的决策" → 需要结合时间信息,权重需动态调整 + +2. **无法适应不同记忆类型**: + - Preference(偏好):L0 抽象通常已包含关键信息 + - Event(事件):L2 细节更重要 + - Entity(实体):需要 L2 精确匹配 + +3. **与 Intent 分析割裂**:虽然有 Intent 分析,但未影响权重分配 + +### 1.2 现有降级策略的局限 + +当 L0 搜索失败时: + +```rust +// 策略1: 降低阈值重试 +let relaxed_threshold = (adaptive_threshold - 0.2).max(0.4); + +// 策略2: 完全降级到语义搜索 +return self.semantic_search(query, options).await; +``` + +**问题**:降级策略是"全有或全无",无法细粒度调整权重。 + +## 2. 解决方案 + +### 2.1 架构设计 + +``` +┌─────────────────────────────────────────────────────────────────────────────┐ +│ 动态权重分配架构 │ +├─────────────────────────────────────────────────────────────────────────────┤ +│ │ +│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │ +│ │ 查询意图 │───►│ 上下文 │───►│ 权重计算 │───►│ 检索执行 │ │ +│ │ 分析结果 │ │ 特征提取 │ │ 引擎 │ │ │ │ +│ └─────────────┘ └─────────────┘ └─────────────┘ └─────────────┘ │ +│ │ │ │ +│ │ │ │ +│ ▼ ▼ │ +│ ┌──────────────────────────────────────────┐ │ +│ │ 权重模板库 + 自适应学习 │ │ +│ │ ┌─────────┐ ┌─────────┐ ┌─────────┐ │ │ +│ │ │预定义模板│ │历史反馈 │ │实时调整 │ │ │ +│ │ └─────────┘ └─────────┘ └─────────┘ │ │ +│ └──────────────────────────────────────────┘ │ +│ │ +└─────────────────────────────────────────────────────────────────────────────┘ +``` + +### 2.2 权重分配模型 + +#### 2.2.1 权重计算输入特征 + +```rust +// 文件: cortex-mem-core/src/search/weight_model.rs (新建) + +use serde::{Deserialize, Serialize}; + +/// 权重计算的输入特征 +#[derive(Debug, Clone)] +pub struct WeightFeatures { + /// 查询类型特征 + pub query_type: QueryTypeFeature, + /// 记忆类型特征 + pub memory_type: Option, + /// 查询长度特征 + pub query_length: QueryLengthFeature, + /// 时间约束特征 + pub time_constraint: bool, + /// 实体数量 + pub entity_count: usize, + /// 查询复杂度 + pub complexity: f32, + /// 用户历史偏好(如果有) + pub user_preference: Option, +} + +#[derive(Debug, Clone, Copy)] +pub enum QueryTypeFeature { + Factual, + EntityLookup, + Temporal, + Relational, + Procedural, + Conceptual, + Search, + Comparative, + General, +} + +#[derive(Debug, Clone, Copy)] +pub enum MemoryTypeFeature { + Preference, + Entity, + Event, + Case, + PersonalInfo, + WorkHistory, + Relationship, + Goal, + Conversation, +} + +#[derive(Debug, Clone, Copy)] +pub enum QueryLengthFeature { + /// 短查询(≤5词) + Short, + /// 中等查询(6-15词) + Medium, + /// 长查询(>15词) + Long, +} + +/// 用户层级偏好(从历史反馈学习) +#[derive(Debug, Clone, Default)] +pub struct LayerPreference { + /// L0 偏好得分 + pub l0_score: f32, + /// L1 偏好得分 + pub l1_score: f32, + /// L2 偏好得分 + pub l2_score: f32, +} +``` + +#### 2.2.2 权重分配策略 + +```rust +// 文件: cortex-mem-core/src/search/weight_model.rs (继续) + +/// 三层权重 +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +pub struct LayerWeights { + pub l0: f32, // L0 抽象层权重 + pub l1: f32, // L1 概览层权重 + pub l2: f32, // L2 细节层权重 +} + +impl LayerWeights { + /// 归一化权重(确保总和为 1.0) + pub fn normalize(&self) -> Self { + let total = self.l0 + self.l1 + self.l2; + if total == 0.0 { + return Self::default(); + } + Self { + l0: self.l0 / total, + l1: self.l1 / total, + l2: self.l2 / total, + } + } + + /// 默认权重 + pub fn default_weights() -> Self { + Self { l0: 0.2, l1: 0.3, l2: 0.5 } + } +} + +impl Default for LayerWeights { + fn default() -> Self { + Self::default_weights() + } +} + +/// 权重分配策略 +pub trait WeightStrategy: Send + Sync { + /// 计算权重 + fn calculate(&self, features: &WeightFeatures) -> LayerWeights; + + /// 策略名称 + fn name(&self) -> &str; +} + +/// 预定义模板策略 +pub struct TemplateWeightStrategy { + templates: Vec, +} + +/// 权重模板 +#[derive(Debug, Clone)] +pub struct WeightTemplate { + /// 模板名称 + pub name: String, + /// 适用条件 + pub condition: TemplateCondition, + /// 权重值 + pub weights: LayerWeights, + /// 优先级(越高越优先) + pub priority: u8, +} + +#[derive(Debug, Clone)] +pub enum TemplateCondition { + /// 查询类型匹配 + QueryType(QueryTypeFeature), + /// 记忆类型匹配 + MemoryType(MemoryTypeFeature), + /// 查询长度匹配 + QueryLength(QueryLengthFeature), + /// 实体查询 + HasEntities, + /// 时间约束 + HasTimeConstraint, + /// 复合条件 + And(Box, Box), + Or(Box, Box), +} + +impl TemplateCondition { + pub fn matches(&self, features: &WeightFeatures) -> bool { + match self { + Self::QueryType(qt) => features.query_type == *qt, + Self::MemoryType(mt) => features.memory_type == Some(*mt), + Self::QueryLength(ql) => features.query_length == *ql, + Self::HasEntities => features.entity_count > 0, + Self::HasTimeConstraint => features.time_constraint, + Self::And(left, right) => left.matches(features) && right.matches(features), + Self::Or(left, right) => left.matches(features) || right.matches(features), + } + } +} + +impl TemplateWeightStrategy { + pub fn new() -> Self { + Self { + templates: Self::default_templates(), + } + } + + /// 默认模板库 + fn default_templates() -> Vec { + vec![ + // === 查询类型相关模板 === + + // 实体查询:L2 权重最高(需要精确匹配) + WeightTemplate { + name: "entity_lookup".to_string(), + condition: TemplateCondition::QueryType(QueryTypeFeature::EntityLookup), + weights: LayerWeights { l0: 0.1, l1: 0.2, l2: 0.7 }, + priority: 100, + }, + + // 事实查询:L2 权重较高 + WeightTemplate { + name: "factual".to_string(), + condition: TemplateCondition::QueryType(QueryTypeFeature::Factual), + weights: LayerWeights { l0: 0.15, l1: 0.25, l2: 0.6 }, + priority: 90, + }, + + // 时间查询:均衡分布 + WeightTemplate { + name: "temporal".to_string(), + condition: TemplateCondition::QueryType(QueryTypeFeature::Temporal), + weights: LayerWeights { l0: 0.2, l1: 0.35, l2: 0.45 }, + priority: 90, + }, + + // 关系查询:L1 权重最高(概览层包含关系信息) + WeightTemplate { + name: "relational".to_string(), + condition: TemplateCondition::QueryType(QueryTypeFeature::Relational), + weights: LayerWeights { l0: 0.2, l1: 0.5, l2: 0.3 }, + priority: 90, + }, + + // 过程查询:L2 权重高(需要详细步骤) + WeightTemplate { + name: "procedural".to_string(), + condition: TemplateCondition::QueryType(QueryTypeFeature::Procedural), + weights: LayerWeights { l0: 0.1, l1: 0.3, l2: 0.6 }, + priority: 90, + }, + + // 概念查询:L1 权重高(概览层包含概念解释) + WeightTemplate { + name: "conceptual".to_string(), + condition: TemplateCondition::QueryType(QueryTypeFeature::Conceptual), + weights: LayerWeights { l0: 0.25, l1: 0.45, l2: 0.3 }, + priority: 90, + }, + + // 搜索查询:L0 权重高(快速定位) + WeightTemplate { + name: "search".to_string(), + condition: TemplateCondition::QueryType(QueryTypeFeature::Search), + weights: LayerWeights { l0: 0.4, l1: 0.35, l2: 0.25 }, + priority: 90, + }, + + // 比较查询:L1 权重高 + WeightTemplate { + name: "comparative".to_string(), + condition: TemplateCondition::QueryType(QueryTypeFeature::Comparative), + weights: LayerWeights { l0: 0.2, l1: 0.5, l2: 0.3 }, + priority: 90, + }, + + // === 记忆类型相关模板 === + + // 偏好记忆:L0 足够(偏好通常简洁) + WeightTemplate { + name: "preference_memory".to_string(), + condition: TemplateCondition::MemoryType(MemoryTypeFeature::Preference), + weights: LayerWeights { l0: 0.4, l1: 0.4, l2: 0.2 }, + priority: 80, + }, + + // 事件记忆:L2 重要(需要详细上下文) + WeightTemplate { + name: "event_memory".to_string(), + condition: TemplateCondition::MemoryType(MemoryTypeFeature::Event), + weights: LayerWeights { l0: 0.15, l1: 0.25, l2: 0.6 }, + priority: 80, + }, + + // 案例记忆:L1/L2 重要(需要结构和细节) + WeightTemplate { + name: "case_memory".to_string(), + condition: TemplateCondition::MemoryType(MemoryTypeFeature::Case), + weights: LayerWeights { l0: 0.15, l1: 0.4, l2: 0.45 }, + priority: 80, + }, + + // === 查询长度相关模板 === + + // 短查询:L0 权重高(快速匹配) + WeightTemplate { + name: "short_query".to_string(), + condition: TemplateCondition::QueryLength(QueryLengthFeature::Short), + weights: LayerWeights { l0: 0.35, l1: 0.35, l2: 0.3 }, + priority: 70, + }, + + // 长查询:L2 权重高(需要深度匹配) + WeightTemplate { + name: "long_query".to_string(), + condition: TemplateCondition::QueryLength(QueryLengthFeature::Long), + weights: LayerWeights { l0: 0.15, l1: 0.3, l2: 0.55 }, + priority: 70, + }, + + // === 复合条件模板 === + + // 实体查询 + 短查询:极端偏向 L2 + WeightTemplate { + name: "entity_short".to_string(), + condition: TemplateCondition::And( + Box::new(TemplateCondition::QueryType(QueryTypeFeature::EntityLookup)), + Box::new(TemplateCondition::QueryLength(QueryLengthFeature::Short)) + ), + weights: LayerWeights { l0: 0.05, l1: 0.15, l2: 0.8 }, + priority: 110, + }, + + // 时间约束查询 + WeightTemplate { + name: "time_constrained".to_string(), + condition: TemplateCondition::HasTimeConstraint, + weights: LayerWeights { l0: 0.2, l1: 0.35, l2: 0.45 }, + priority: 85, + }, + ] + } +} + +impl WeightStrategy for TemplateWeightStrategy { + fn calculate(&self, features: &WeightFeatures) -> LayerWeights { + // 按优先级排序,找到第一个匹配的模板 + let mut matched: Vec<_> = self.templates + .iter() + .filter(|t| t.condition.matches(features)) + .collect(); + + matched.sort_by(|a, b| b.priority.cmp(&a.priority)); + + if let Some(template) = matched.first() { + template.weights.normalize() + } else { + LayerWeights::default_weights() + } + } + + fn name(&self) -> &str { + "template" + } +} +``` + +#### 2.2.3 自适应学习策略 + +```rust +// 文件: cortex-mem-core/src/search/weight_model.rs (继续) + +use std::collections::HashMap; +use std::sync::Arc; +use tokio::sync::RwLock; + +/// 反馈记录 +#[derive(Debug, Clone)] +pub struct SearchFeedback { + /// 查询特征 + pub features: WeightFeatures, + /// 使用的权重 + pub weights: LayerWeights, + /// 用户是否点击了结果 + pub clicked: bool, + /// 点击结果的排名(1-based) + pub clicked_rank: Option, + /// 用户满意度评分(可选) + pub satisfaction: Option, +} + +/// 自适应权重学习策略 +pub struct AdaptiveWeightStrategy { + /// 基础策略 + base_strategy: Box, + /// 学习率 + learning_rate: f32, + /// 特征到权重调整的映射 + adjustments: Arc>>, + /// 历史反馈统计 + feedback_stats: Arc>, +} + +#[derive(Debug, Clone, Default)] +struct FeedbackStats { + total_feedbacks: usize, + positive_feedbacks: usize, + feature_click_rates: HashMap, // (clicks, total) +} + +impl AdaptiveWeightStrategy { + pub fn new(base_strategy: Box) -> Self { + Self { + base_strategy, + learning_rate: 0.1, + adjustments: Arc::new(RwLock::new(HashMap::new())), + feedback_stats: Arc::new(RwLock::new(FeedbackStats::default())), + } + } + + /// 记录反馈并学习 + pub async fn record_feedback(&self, feedback: SearchFeedback) { + let mut stats = self.feedback_stats.write().await; + stats.total_feedbacks += 1; + + if feedback.clicked { + stats.positive_feedbacks += 1; + + // 更新特征点击率 + let feature_key = Self::feature_key(&feedback.features); + let entry = stats.feature_click_rates.entry(feature_key.clone()).or_insert((0, 0)); + entry.0 += 1; + entry.1 += 1; + + // 学习:如果用户点击了结果,增加该权重的置信度 + // 这里可以更复杂的在线学习算法 + } + } + + fn feature_key(features: &WeightFeatures) -> String { + format!("{:?}_{:?}", features.query_type, features.query_length) + } +} + +impl WeightStrategy for AdaptiveWeightStrategy { + fn calculate(&self, features: &WeightFeatures) -> LayerWeights { + // 1. 获取基础权重 + let base_weights = self.base_strategy.calculate(features); + + // 2. 尝试同步获取调整(简化版本,实际应该 async) + // 由于 trait 方法是同步的,这里使用 try_read + if let Ok(adjustments) = self.adjustments.try_read() { + let feature_key = Self::feature_key(features); + if let Some(adjustment) = adjustments.get(&feature_key) { + // 融合基础权重和调整 + return LayerWeights { + l0: base_weights.l0 * (1.0 - self.learning_rate) + adjustment.l0 * self.learning_rate, + l1: base_weights.l1 * (1.0 - self.learning_rate) + adjustment.l1 * self.learning_rate, + l2: base_weights.l2 * (1.0 - self.learning_rate) + adjustment.l2 * self.learning_rate, + }.normalize(); + } + } + + base_weights + } + + fn name(&self) -> &str { + "adaptive" + } +} +``` + +### 2.3 动态权重检索执行器 + +```rust +// 文件: cortex-mem-core/src/search/dynamic_weight_executor.rs (新建) + +use crate::search::weight_model::*; +use crate::search::{VectorSearchEngine, SearchOptions, SearchResult, QueryIntent}; +use crate::embedding::EmbeddingClient; +use crate::vector_store::QdrantVectorStore; +use crate::filesystem::CortexFilesystem; +use crate::Result; +use std::sync::Arc; +use tracing::{info, debug}; + +/// 动态权重检索执行器 +pub struct DynamicWeightSearchExecutor { + vector_engine: Arc, + weight_strategy: Box, + embedding: Arc, +} + +impl DynamicWeightSearchExecutor { + pub fn new( + vector_engine: Arc, + embedding: Arc, + strategy: Box, + ) -> Self { + Self { + vector_engine, + weight_strategy: strategy, + embedding, + } + } + + /// 使用动态权重执行三层级联检索 + pub async fn search_with_dynamic_weights( + &self, + query: &str, + intent: &QueryIntent, + options: &SearchOptions, + ) -> Result> { + // 1. 提取权重特征 + let features = self.extract_features(query, intent, options).await?; + + // 2. 计算动态权重 + let weights = self.weight_strategy.calculate(&features); + + info!( + "Dynamic weights for query '{}': L0={:.2}, L1={:.2}, L2={:.2}", + query, weights.l0, weights.l1, weights.l2 + ); + + // 3. 执行三层级联检索 + self.execute_with_weights(query, &weights, options).await + } + + /// 提取权重特征 + async fn extract_features( + &self, + query: &str, + intent: &QueryIntent, + options: &SearchOptions, + ) -> Result { + // 从 Intent 中提取特征 + let query_type = Self::map_intent_to_feature(&intent.intent_type); + + // 查询长度特征 + let word_count = query.split_whitespace().count(); + let query_length = match word_count { + 0..=5 => QueryLengthFeature::Short, + 6..=15 => QueryLengthFeature::Medium, + _ => QueryLengthFeature::Long, + }; + + // 计算复杂度(基于查询长度、关键词数量等) + let complexity = Self::calculate_complexity(query, &intent.keywords); + + Ok(WeightFeatures { + query_type, + memory_type: None, // 可从 options 或 filters 中获取 + query_length, + time_constraint: false, // 可从 intent 中判断 + entity_count: 0, // 可从 intent.entities 中获取 + complexity, + user_preference: None, + }) + } + + fn map_intent_to_feature(intent_type: &crate::search::QueryIntentType) -> QueryTypeFeature { + match intent_type { + crate::search::QueryIntentType::Factual => QueryTypeFeature::Factual, + crate::search::QueryIntentType::Search => QueryTypeFeature::Search, + crate::search::QueryIntentType::Relational => QueryTypeFeature::Relational, + crate::search::QueryIntentType::Temporal => QueryTypeFeature::Temporal, + crate::search::QueryIntentType::General => QueryTypeFeature::General, + } + } + + fn calculate_complexity(query: &str, keywords: &[String]) -> f32 { + let len_factor = (query.len() as f32 / 100.0).min(1.0); + let keyword_factor = (keywords.len() as f32 / 10.0).min(1.0); + (len_factor + keyword_factor) / 2.0 + } + + /// 执行带权重的三层检索 + async fn execute_with_weights( + &self, + query: &str, + weights: &LayerWeights, + options: &SearchOptions, + ) -> Result> { + // 生成查询嵌入 + let query_vec = self.embedding.embed(query).await?; + + // === Stage 1: L0 快速定位 === + let l0_threshold = self.calculate_adaptive_l0_threshold(weights.l0); + let l0_results = self.search_l0(&query_vec, options, l0_threshold).await?; + + if l0_results.is_empty() { + debug!("L0 results empty, fallback to full semantic search"); + return self.vector_engine.semantic_search(query, options).await; + } + + // === Stage 2: L1 深度探索 === + let l1_candidates = self.search_l1(&query_vec, &l0_results, options).await?; + + // === Stage 3: L2 精确匹配 === + let final_results = self.search_l2_with_weights( + &query_vec, + &l0_results, + &l1_candidates, + weights, + options + ).await?; + + Ok(final_results) + } + + fn calculate_adaptive_l0_threshold(&self, l0_weight: f32) -> f32 { + // L0 权重越高,阈值应该越低(更宽松地通过 L0) + // 基础阈值 0.5,根据权重调整 + let base = 0.5; + let adjustment = (l0_weight - 0.2) * 0.3; // 权重差异的影响 + (base - adjustment).clamp(0.3, 0.6) + } + + async fn search_l0( + &self, + query_vec: &[f32], + options: &SearchOptions, + threshold: f32, + ) -> Result> { + // 调用现有的 L0 搜索逻辑 + // 实际实现需要访问 vector_store + Ok(Vec::new()) + } + + async fn search_l1( + &self, + query_vec: &[f32], + l0_results: &[crate::types::ScoredMemory], + options: &SearchOptions, + ) -> Result> { + // L1 搜索逻辑 + Ok(Vec::new()) + } + + async fn search_l2_with_weights( + &self, + query_vec: &[f32], + l0_results: &[crate::types::ScoredMemory], + l1_candidates: &[(String, f32)], + weights: &LayerWeights, + options: &SearchOptions, + ) -> Result> { + let mut results = Vec::new(); + + // 遍历候选,使用动态权重计算综合分数 + for (dir_uri, l0_score, l1_score) in self.combine_l0_l1(l0_results, l1_candidates) { + // 获取 L2 内容并计算分数 + if let Some(l2_result) = self.search_l2_for_dir(query_vec, &dir_uri).await? { + // 使用动态权重计算综合分数 + let combined_score = + l0_score * weights.l0 + + l1_score * weights.l1 + + l2_result.score * weights.l2; + + if combined_score >= options.threshold { + results.push(SearchResult { + uri: l2_result.uri, + score: combined_score, + snippet: l2_result.snippet, + content: l2_result.content, + }); + } + } + } + + // 排序并截断 + results.sort_by(|a, b| b.score.partial_cmp(&a.score).unwrap()); + results.truncate(options.limit); + + Ok(results) + } + + fn combine_l0_l1( + &self, + l0_results: &[crate::types::ScoredMemory], + l1_candidates: &[(String, f32)], + ) -> Vec<(String, f32, f32)> { + // 合并 L0 和 L1 结果 + Vec::new() + } + + async fn search_l2_for_dir( + &self, + query_vec: &[f32], + dir_uri: &str, + ) -> Result> { + // 在目录中搜索 L2 内容 + Ok(None) + } +} +``` + +### 2.4 集成到搜索引擎 + +```rust +// 文件: cortex-mem-core/src/search/vector_engine.rs (修改) + +impl VectorSearchEngine { + /// 智能搜索(使用动态权重) + pub async fn smart_search_with_dynamic_weights( + &self, + query: &str, + options: &SearchOptions, + intent: Option<&QueryIntent>, + ) -> Result> { + // 1. 分析意图 + let analyzed_intent = if let Some(i) = intent { + i.clone() + } else { + self.analyze_intent(query).await? + }; + + // 2. 选择权重策略 + let strategy = self.select_weight_strategy(); + + // 3. 创建动态权重执行器 + let executor = DynamicWeightSearchExecutor::new( + Arc::new(self.clone()), + self.embedding.clone(), + strategy, + ); + + // 4. 执行检索 + executor.search_with_dynamic_weights(query, &analyzed_intent, options).await + } + + fn select_weight_strategy(&self) -> Box { + // 可以根据配置选择不同策略 + // 默认使用模板策略 + Box::new(TemplateWeightStrategy::new()) + } + + /// 更新检索反馈(用于自适应学习) + pub async fn record_search_feedback( + &self, + query: &str, + clicked_result_uri: &str, + rank: usize, + ) -> Result<()> { + // 记录反馈,用于后续学习 + Ok(()) + } +} +``` + +### 2.5 配置支持 + +```toml +# config.toml 新增配置 + +[search.weights] +# 权重策略: template, adaptive, hybrid +strategy = "template" + +# 默认权重(当没有匹配模板时使用) +default_l0 = 0.2 +default_l1 = 0.3 +default_l2 = 0.5 + +# 自适应学习配置 +[search.weights.adaptive] +enabled = false +learning_rate = 0.1 +min_samples_for_learning = 100 + +# 自定义权重模板 +[[search.weights.templates]] +name = "custom_entity" +query_type = "entity_lookup" +l0 = 0.1 +l1 = 0.15 +l2 = 0.75 +``` + +### 2.6 API 扩展 + +```rust +// REST API 新增端点 + +// 获取当前权重策略状态 +GET /api/v2/search/weights/status + +// 手动设置权重(测试用) +POST /api/v2/search/weights/set +{ + "l0": 0.3, + "l1": 0.3, + "l2": 0.4 +} + +// 记录检索反馈 +POST /api/v2/search/feedback +{ + "query": "用户的查询", + "clicked_uri": "cortex://user/preferences/xxx.md", + "rank": 1, + "satisfaction": 0.9 +} +``` + +## 3. 实现计划 + +| 步骤 | 任务 | 依赖 | +|------|------|------| +| 1 | 定义 WeightFeatures 和 LayerWeights 结构 | 无 | +| 2 | 实现 TemplateWeightStrategy | 步骤1 | +| 3 | 实现 AdaptiveWeightStrategy | 步骤1, 2 | +| 4 | 实现 DynamicWeightSearchExecutor | 步骤2 | +| 5 | 集成到 VectorSearchEngine | 步骤4 | +| 6 | 添加配置和 API 支持 | 全部 | + +## 4. 预期收益 + +| 场景 | 静态权重 | 动态权重 | 提升 | +|------|----------|----------|------| +| 实体查询召回 | 75% | 92% | +17pp | +| 语义查询召回 | 88% | 91% | +3pp | +| 关系查询召回 | 70% | 85% | +15pp | +| 综合召回精度 | 85% | 93% | +8pp | + +## 5. 测试方案 + +### 5.1 单元测试 + +```rust +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_template_matching() { + let strategy = TemplateWeightStrategy::new(); + + // 测试实体查询 + let features = WeightFeatures { + query_type: QueryTypeFeature::EntityLookup, + ..Default::default() + }; + let weights = strategy.calculate(&features); + assert!(weights.l2 > weights.l0); + assert!(weights.l2 > weights.l1); + + // 测试搜索查询 + let features = WeightFeatures { + query_type: QueryTypeFeature::Search, + ..Default::default() + }; + let weights = strategy.calculate(&features); + assert!(weights.l0 >= weights.l2); + } + + #[test] + fn test_weight_normalization() { + let weights = LayerWeights { l0: 1.0, l1: 2.0, l2: 2.0 }; + let normalized = weights.normalize(); + assert!((normalized.l0 + normalized.l1 + normalized.l2 - 1.0).abs() < 0.001); + } +} +``` + +### 5.2 A/B 测试 + +```rust +// 在生产环境中进行 A/B 测试 +// 对照组:静态权重 +// 实验组:动态权重 +// 指标:召回率、用户满意度、检索延迟 +``` + +## 6. 与其他方案的协同 + +1. **与 Intent 分析协同**: + - Intent 分析提供 `query_type`,直接作为权重特征输入 + - 避免重复分析 + +2. **与遗忘机制协同**: + - 低强度记忆在 L2 权重中降权 + - 权重计算可考虑记忆强度 + +3. **与实体检索协同**: + - 实体查询自动触发高 L2 权重 + - 实体数量作为特征输入 diff --git a/examples/cortex-mem-tars/src/app.rs b/examples/cortex-mem-tars/src/app.rs index 09e7069..ca75686 100644 --- a/examples/cortex-mem-tars/src/app.rs +++ b/examples/cortex-mem-tars/src/app.rs @@ -419,7 +419,7 @@ impl App { log::info!("即将调用 on_enter_chat_mode..."); - self.on_enter_chat_mode(&bot); + self.on_enter_chat_mode(&bot).await; log::info!("on_enter_chat_mode 调用完成"); } else { @@ -514,103 +514,13 @@ impl App { return Ok(()); } - // 检查是否刚进入聊天模式 + // 检查是否有选中的机器人(防护性检查,正常情况下 on_enter_chat_mode 已经初始化) if self.current_bot.is_none() { if let Some(bot) = self.ui.selected_bot() { - self.current_bot = Some(bot.clone()); - - // 更新 current_bot_id - if let Ok(mut bot_id) = self.current_bot_id.write() { - *bot_id = Some(bot.id.clone()); - log::info!("已更新当前机器人 ID: {}", bot.id); - } - - // 如果有基础设施,创建真实的带记忆的 Agent - if let Some(infrastructure) = &self.infrastructure { - let config = infrastructure.config(); - match create_memory_agent( - config.cortex.data_dir(), - config, - None, // user_info 稍后从租户 operations 提取 - Some(bot.system_prompt.as_str()), - &bot.id, - &self.user_id, - ) - .await - { - Ok((rig_agent, tenant_ops)) => { - // 保存租户 operations - self.tenant_operations = Some(tenant_ops.clone()); - - // 从租户 operations 提取用户基本信息 - let user_info = match extract_user_basic_info( - tenant_ops.clone(), - &self.user_id, - &bot.id, - ) - .await - { - Ok(info) => { - self.user_info = info.clone(); - info - } - Err(e) => { - log::error!("提取用户基本信息失败: {}", e); - None - } - }; - - // 如果有用户信息,需要重新创建 Agent(带用户信息) - if user_info.is_some() { - let config = infrastructure.config(); - match create_memory_agent( - config.cortex.data_dir(), - config, - user_info.as_deref(), - Some(bot.system_prompt.as_str()), - &bot.id, - &self.user_id, - ) - .await - { - Ok((agent_with_userinfo, _)) => { - self.rig_agent = Some(agent_with_userinfo); - log::info!("已创建带记忆功能的真实 Agent(含用户信息)"); - } - Err(e) => { - log::error!("重新创建 Agent 失败: {}", e); - self.rig_agent = Some(rig_agent); - } - } - } else { - self.rig_agent = Some(rig_agent); - log::info!("已创建带记忆功能的真实 Agent"); - } - - // 🔧 创建rig_agent后立即初始化agent_handler - if let Some(rig_agent) = &self.rig_agent { - let session_id = self - .current_session_id - .get_or_insert_with(|| uuid::Uuid::new_v4().to_string()) - .clone(); - self.agent_handler = Some(AgentChatHandler::with_memory( - rig_agent.clone(), - tenant_ops.clone(), - session_id, - )); - log::info!( - "✅ 已初始化 agent_handler with session_id: {}", - self.current_session_id.as_ref().unwrap() - ); - } - } - Err(e) => { - log::error!("创建真实 Agent 失败 {}", e); - } - } - } - - log::info!("选择机器人: {}", bot.name); + // 如果 on_enter_chat_mode 未被调用(例如直接跳转到 Chat),在此兜底初始化 + let bot_cloned = bot.clone(); + log::warn!("⚠️ 检测到 current_bot 未设置,执行兜底初始化..."); + self.on_enter_chat_mode(&bot_cloned).await; } else { log::warn!("没有选中的机器人"); return Ok(()); @@ -829,41 +739,117 @@ impl App { Ok(()) } - /// 启动 API 服务器 - /// 当切换到聊天状态时调用此方法 - pub fn on_enter_chat_mode(&mut self, bot: &BotConfig) { + /// 进入聊天模式时初始化 Agent 和 AgentHandler + /// 当从 BotSelection 或 PasswordInput 切换到 Chat 状态时调用此方法 + pub async fn on_enter_chat_mode(&mut self, bot: &BotConfig) { log::info!("🎯 进入聊天模式,机器人: {} (ID: {})", bot.name, bot.id); - // 更新 current_bot_id + // 更新 current_bot_id 和 current_bot if let Ok(mut bot_id) = self.current_bot_id.write() { *bot_id = Some(bot.id.clone()); log::info!("✅ 已更新当前机器人 ID: {}", bot.id); } else { log::error!("❌ 无法更新 current_bot_id"); } + self.current_bot = Some(bot.clone()); + + // 如果 rig_agent 还未初始化,在此异步初始化 + if self.rig_agent.is_none() { + if let Some(infrastructure) = &self.infrastructure { + log::info!("🤖 开始初始化 AI Agent..."); + let config = infrastructure.config(); + match create_memory_agent( + config.cortex.data_dir(), + config, + None, // user_info 稍后从租户 operations 提取 + Some(bot.system_prompt.as_str()), + &bot.id, + &self.user_id, + ) + .await + { + Ok((rig_agent, tenant_ops)) => { + self.tenant_operations = Some(tenant_ops.clone()); + + // 从租户 operations 提取用户基本信息 + let user_info = match extract_user_basic_info( + tenant_ops.clone(), + &self.user_id, + &bot.id, + ) + .await + { + Ok(info) => { + self.user_info = info.clone(); + info + } + Err(e) => { + log::error!("提取用户基本信息失败: {}", e); + None + } + }; - // 🔧 初始化agent_handler + // 如果有用户信息,需要重新创建带用户信息的 Agent + if user_info.is_some() { + let config = infrastructure.config(); + match create_memory_agent( + config.cortex.data_dir(), + config, + user_info.as_deref(), + Some(bot.system_prompt.as_str()), + &bot.id, + &self.user_id, + ) + .await + { + Ok((agent_with_userinfo, _)) => { + self.rig_agent = Some(agent_with_userinfo); + log::info!("✅ 已创建带用户信息的 Agent"); + } + Err(e) => { + log::error!("重新创建带用户信息的 Agent 失败: {}", e); + self.rig_agent = Some(rig_agent); + } + } + } else { + self.rig_agent = Some(rig_agent); + log::info!("✅ 已创建 Agent(无用户历史记忆)"); + } + } + Err(e) => { + log::error!("❌ 初始化 AI Agent 失败: {}", e); + return; + } + } + } else { + log::error!("❌ 基础设施未初始化,无法创建 Agent"); + return; + } + } + + // 初始化 agent_handler if let Some(rig_agent) = &self.rig_agent { + let session_id = self + .current_session_id + .get_or_insert_with(|| uuid::Uuid::new_v4().to_string()) + .clone(); + if let Some(tenant_ops) = &self.tenant_operations { - let session_id = self - .current_session_id - .get_or_insert_with(|| uuid::Uuid::new_v4().to_string()) - .clone(); self.agent_handler = Some(AgentChatHandler::with_memory( rig_agent.clone(), tenant_ops.clone(), session_id, )); log::info!( - "✅ 已初始化 agent_handler with session_id: {}", + "✅ 已初始化 agent_handler(含记忆)session_id: {}", self.current_session_id.as_ref().unwrap() ); } else { self.agent_handler = Some(AgentChatHandler::new(rig_agent.clone())); - log::info!("✅ 已初始化 agent_handler (无记忆)"); + log::info!("✅ 已初始化 agent_handler(无记忆)"); } } else { - log::warn!("⚠️ rig_agent 未初始化,无法创建 agent_handler"); + log::error!("❌ rig_agent 初始化后仍为 None,无法创建 agent_handler"); } } diff --git a/examples/cortex-mem-tars/src/audio_input.rs b/examples/cortex-mem-tars/src/audio_input.rs index b82f6e6..ca2184a 100644 --- a/examples/cortex-mem-tars/src/audio_input.rs +++ b/examples/cortex-mem-tars/src/audio_input.rs @@ -17,7 +17,7 @@ pub struct AudioCaptureConfig { impl From<&SupportedStreamConfig> for AudioCaptureConfig { fn from(config: &SupportedStreamConfig) -> Self { Self { - sample_rate: config.sample_rate().0, + sample_rate: config.sample_rate(), channels: config.channels(), } } @@ -46,7 +46,10 @@ impl AudioStreamManager { .default_input_device() .context("没有可用的音频输入设备")?; - log::info!("音频输入设备: {}", device.name()?); + let device_desc = device.description() + .map(|d| format!("{:?}", d)) + .unwrap_or_else(|_| "unknown".to_string()); + log::info!("音频输入设备: {}", device_desc); log::info!("音频增益: {:.1}x", AUDIO_GAIN); let config = device diff --git a/examples/cortex-mem-tars/src/audio_transcription.rs b/examples/cortex-mem-tars/src/audio_transcription.rs index 11ab4ba..a0f1f80 100644 --- a/examples/cortex-mem-tars/src/audio_transcription.rs +++ b/examples/cortex-mem-tars/src/audio_transcription.rs @@ -207,24 +207,25 @@ impl WhisperTranscriber { // 执行转录 state.full(params, audio_data).context("Whisper 转录失败")?; - // 收集所有段落 - let num_segments = state.full_n_segments().context("无法获取段落数量")?; + // 收集所有段落(使用 whisper-rs 0.15+ 的迭代器 API) + let num_segments = state.full_n_segments(); log::debug!("Whisper 识别出 {} 个段落", num_segments); let mut transcribed_text = String::new(); - for i in 0..num_segments { - if let Ok(segment) = state.full_get_segment_text(i) { - let segment_text = segment.trim(); - - if !segment_text.is_empty() { - log::debug!("段落 {}: '{}'", i, segment_text); - - // 在段落之间添加空格 - if !transcribed_text.is_empty() { - transcribed_text.push(' '); - } - transcribed_text.push_str(segment_text); + for segment in state.as_iter() { + let segment_text = segment.to_str_lossy() + .map(|s| s.into_owned()) + .unwrap_or_default(); + let segment_text = segment_text.trim(); + + if !segment_text.is_empty() { + log::debug!("段落: '{}'", segment_text); + + // 在段落之间添加空格 + if !transcribed_text.is_empty() { + transcribed_text.push(' '); } + transcribed_text.push_str(segment_text); } } From 39c5313941fcbf1dd82ed87b2de8ab7ba3923145 Mon Sep 17 00:00:00 2001 From: Sopaco Date: Tue, 10 Mar 2026 17:03:12 +0800 Subject: [PATCH 08/20] Refactor memory extraction to use synchronous coordinator Replace the old asynchronous AutoExtractor with MemoryEventCoordinator for synchronous memory extraction during session close. This provides better guarantees that memory processing completes before the CLI exits. --- cortex-mem-cli/src/commands/session.rs | 29 +- .../src/automation/auto_extract.rs | 101 ---- cortex-mem-core/src/automation/manager.rs | 302 ++--------- cortex-mem-core/src/automation/mod.rs | 4 - cortex-mem-core/src/automation/watcher.rs | 249 --------- cortex-mem-core/src/extraction/extractor.rs | 363 ------------- cortex-mem-core/src/extraction/mod.rs | 8 - cortex-mem-core/src/extraction/types.rs | 274 ---------- cortex-mem-core/src/lib.rs | 8 +- .../src/memory_event_coordinator.rs | 25 +- cortex-mem-core/src/session/manager.rs | 58 +- cortex-mem-mcp/src/service.rs | 24 +- cortex-mem-service/src/error.rs | 1 + cortex-mem-service/src/handlers/automation.rs | 72 +-- cortex-mem-service/src/state.rs | 1 + cortex-mem-tools/src/operations.rs | 218 +++----- .../tests/llm_integration_tests.rs | 8 +- examples/cortex-mem-tars/src/agent.rs | 495 ++++++++++++------ examples/cortex-mem-tars/src/app.rs | 63 +-- 19 files changed, 566 insertions(+), 1737 deletions(-) delete mode 100644 cortex-mem-core/src/automation/auto_extract.rs delete mode 100644 cortex-mem-core/src/automation/watcher.rs delete mode 100644 cortex-mem-core/src/extraction/extractor.rs delete mode 100644 cortex-mem-core/src/extraction/mod.rs delete mode 100644 cortex-mem-core/src/extraction/types.rs diff --git a/cortex-mem-cli/src/commands/session.rs b/cortex-mem-cli/src/commands/session.rs index 9487c50..0b6d868 100644 --- a/cortex-mem-cli/src/commands/session.rs +++ b/cortex-mem-cli/src/commands/session.rs @@ -52,27 +52,22 @@ pub async fn create( Ok(()) } -/// Close a session and trigger memory extraction, layer generation, and indexing +/// Close a session and synchronously wait for memory extraction, L0/L1 generation, +/// and vector indexing to complete before returning. pub async fn close(operations: Arc, thread: &str) -> Result<()> { println!("{} Closing session: {}", "🔒".bold(), thread.cyan()); - - // Close the session (triggers SessionClosed event → MemoryEventCoordinator) - operations.close_session(thread).await?; - - println!("{} Session closed successfully", "✓".green().bold()); - println!(" {}: {}", "Thread ID".cyan(), thread); - println!(); println!("{} Waiting for memory extraction, L0/L1 generation, and indexing to complete...", "⏳".yellow().bold()); - // Wait for background tasks to complete (max 60 seconds) - // This ensures memory extraction, layer generation, and vector indexing finish before CLI exits - let completed = operations.flush_and_wait(Some(1)).await; + // close_session_sync blocks until the full pipeline completes: + // 1. Session metadata → marked closed + // 2. LLM memory extraction from session timeline + // 3. user/agent memory files written + // 4. L0/L1 layer files generated for all affected directories + // 5. Session timeline synced to vector store + operations.close_session_sync(thread).await?; - if completed { - println!("{} All background tasks completed successfully", "✓".green().bold()); - } else { - println!("{} Background tasks timed out (some may still be processing)", "⚠".yellow().bold()); - } + println!("{} Session closed and all processing completed", "✓".green().bold()); + println!(" {}: {}", "Thread ID".cyan(), thread); Ok(()) -} \ No newline at end of file +} diff --git a/cortex-mem-core/src/automation/auto_extract.rs b/cortex-mem-core/src/automation/auto_extract.rs deleted file mode 100644 index 80b22d0..0000000 --- a/cortex-mem-core/src/automation/auto_extract.rs +++ /dev/null @@ -1,101 +0,0 @@ -use crate::{ - Result, - filesystem::CortexFilesystem, - llm::LLMClient, -}; -use std::sync::Arc; -use tracing::info; - -/// 会话自动提取配置 -#[derive(Debug, Clone)] -pub struct AutoExtractConfig { - /// 触发自动提取的最小消息数 - pub min_message_count: usize, - /// 是否在会话关闭时自动提取 - pub extract_on_close: bool, -} - -impl Default for AutoExtractConfig { - fn default() -> Self { - Self { - min_message_count: 5, - extract_on_close: true, - } - } -} - -/// 自动提取统计 -#[derive(Debug, Clone, Default)] -pub struct AutoExtractStats { - pub facts_extracted: usize, - pub decisions_extracted: usize, - pub entities_extracted: usize, - pub user_memories_saved: usize, - pub agent_memories_saved: usize, -} - -/// 会话自动提取器 -/// -/// v2.5: 此结构体已被简化,记忆提取现在由 SessionManager 通过 MemoryEventCoordinator 处理。 -/// 保留此结构体仅用于向后兼容。 -pub struct AutoExtractor { - #[allow(dead_code)] - filesystem: Arc, - #[allow(dead_code)] - llm: Arc, - #[allow(dead_code)] - config: AutoExtractConfig, - user_id: String, -} - -impl AutoExtractor { - /// 创建新的自动提取器 - pub fn new( - filesystem: Arc, - llm: Arc, - config: AutoExtractConfig, - ) -> Self { - Self { - filesystem, - llm, - config, - user_id: "default".to_string(), - } - } - - /// 创建新的自动提取器,指定用户ID - pub fn with_user_id( - filesystem: Arc, - llm: Arc, - config: AutoExtractConfig, - user_id: impl Into, - ) -> Self { - Self { - filesystem, - llm, - config, - user_id: user_id.into(), - } - } - - /// 设置用户ID - pub fn set_user_id(&mut self, user_id: impl Into) { - self.user_id = user_id.into(); - } - - /// 提取会话记忆 - /// - /// v2.5: 此方法已被废弃。记忆提取现在由 SessionManager::close_session 通过 - /// MemoryEventCoordinator 异步处理。此方法返回空统计用于向后兼容。 - pub async fn extract_session(&self, _thread_id: &str) -> Result { - info!( - "AutoExtractor::extract_session is deprecated - memory extraction is handled by MemoryEventCoordinator" - ); - Ok(AutoExtractStats::default()) - } - - /// 获取用户ID - pub fn user_id(&self) -> &str { - &self.user_id - } -} diff --git a/cortex-mem-core/src/automation/manager.rs b/cortex-mem-core/src/automation/manager.rs index 2eef190..618a166 100644 --- a/cortex-mem-core/src/automation/manager.rs +++ b/cortex-mem-core/src/automation/manager.rs @@ -1,6 +1,6 @@ use crate::{ Result, - automation::{AutoExtractor, AutoIndexer, LayerGenerator}, + automation::AutoIndexer, events::{CortexEvent, SessionEvent}, }; use std::collections::HashSet; @@ -14,114 +14,66 @@ use tracing::{info, warn}; pub struct AutomationConfig { /// 是否启用自动索引 pub auto_index: bool, - /// 是否启用自动提取 - pub auto_extract: bool, /// 消息添加时是否立即索引(实时) pub index_on_message: bool, - /// 会话关闭时是否索引(批量) - pub index_on_close: bool, /// 索引批处理延迟(秒) pub index_batch_delay: u64, - /// 启动时自动生成缺失的 L0/L1 文件 - pub auto_generate_layers_on_startup: bool, - /// 每N条消息触发一次L0/L1生成(0表示禁用) - pub generate_layers_every_n_messages: usize, - /// 最大并发 LLM 任务数(防止压垮 LLM API) - pub max_concurrent_llm_tasks: usize, + /// 最大并发任务数 + pub max_concurrent_tasks: usize, } impl Default for AutomationConfig { fn default() -> Self { Self { auto_index: true, - auto_extract: true, - index_on_message: false, // 默认不实时索引(性能考虑) - index_on_close: true, // 默认会话关闭时索引 + index_on_message: false, // 默认批处理模式(性能考虑) index_batch_delay: 2, - auto_generate_layers_on_startup: false, // 默认关闭(避免启动时阻塞) - generate_layers_every_n_messages: 0, // 默认禁用(避免频繁LLM调用) - max_concurrent_llm_tasks: 3, // 默认最多3个并发LLM任务 + max_concurrent_tasks: 3, } } } -/// 自动化管理器 - 统一调度索引和提取 +/// 自动化管理器 +/// +/// 职责(v2.5 重构后): +/// - 监听 `MessageAdded` 事件,将新消息内容(L2 级别)索引到向量数据库 +/// +/// 不再负责: +/// - 记忆提取(由 `MemoryEventCoordinator` 统一处理) +/// - L0/L1 层级文件生成(由 `CascadeLayerUpdater` 统一处理) +/// - Session 关闭时的全量索引(由 `VectorSyncManager` 统一处理) pub struct AutomationManager { indexer: Arc, - extractor: Option>, - layer_generator: Option>, config: AutomationConfig, /// 并发限制信号量 - llm_semaphore: Arc, + semaphore: Arc, } impl AutomationManager { /// 创建自动化管理器 - pub fn new( - indexer: Arc, - extractor: Option>, - config: AutomationConfig, - ) -> Self { - let llm_semaphore = Arc::new(Semaphore::new(config.max_concurrent_llm_tasks)); + pub fn new(indexer: Arc, config: AutomationConfig) -> Self { + let semaphore = Arc::new(Semaphore::new(config.max_concurrent_tasks)); Self { indexer, - extractor, - layer_generator: None, config, - llm_semaphore, + semaphore, } } - /// 设置层级生成器(可选) - pub fn with_layer_generator(mut self, layer_generator: Arc) -> Self { - self.layer_generator = Some(layer_generator); - self - } - /// 获取并发限制信号量(供外部使用) - pub fn llm_semaphore(&self) -> Arc { - self.llm_semaphore.clone() + pub fn semaphore(&self) -> Arc { + self.semaphore.clone() } - /// 🎯 核心方法:启动自动化任务 + /// 启动自动化任务,监听 EventBus 事件 pub async fn start(self, mut event_rx: mpsc::UnboundedReceiver) -> Result<()> { - info!("Starting AutomationManager with config: {:?}", self.config); - - // 启动时自动生成缺失的 L0/L1 文件 - if self.config.auto_generate_layers_on_startup { - if let Some(ref generator) = self.layer_generator { - info!("Checking and generating missing L0/L1 files on startup..."); - let generator_clone = generator.clone(); - let semaphore = self.llm_semaphore.clone(); - tokio::spawn(async move { - // Acquire semaphore permit - let _permit = semaphore.acquire().await; - match generator_clone.ensure_all_layers().await { - Ok(stats) => { - info!( - "Startup layer generation completed: total={}, generated={}, failed={}", - stats.total, stats.generated, stats.failed - ); - } - Err(e) => { - warn!("Startup layer generation failed: {}", e); - } - } - }); - } else { - warn!("auto_generate_layers_on_startup is enabled but layer_generator is not set"); - } - } + info!("AutomationManager started (L2 message indexing only)"); - // 批处理缓冲区(收集需要索引的session_id) + // 批处理缓冲区(收集需要索引的 session_id) let mut pending_sessions: HashSet = HashSet::new(); let batch_delay = Duration::from_secs(self.config.index_batch_delay); let mut batch_timer: Option = None; - // 会话消息计数器(用于触发定期L0/L1生成) - let mut session_message_counts: std::collections::HashMap = - std::collections::HashMap::new(); - loop { tokio::select! { // 事件处理 @@ -131,9 +83,8 @@ impl AutomationManager { &mut pending_sessions, &mut batch_timer, batch_delay, - &mut session_message_counts ).await { - warn!("Failed to handle event: {}", e); + warn!("AutomationManager: failed to handle event: {}", e); } } @@ -147,7 +98,7 @@ impl AutomationManager { } => { if !pending_sessions.is_empty() { if let Err(e) = self.flush_batch(&mut pending_sessions).await { - warn!("Failed to flush batch: {}", e); + warn!("AutomationManager: failed to flush batch: {}", e); } batch_timer = None; } @@ -156,233 +107,68 @@ impl AutomationManager { } } - /// 处理事件 + /// 处理事件 — 仅关心 MessageAdded(L2 索引) async fn handle_event( &self, event: CortexEvent, pending_sessions: &mut HashSet, batch_timer: &mut Option, batch_delay: Duration, - session_message_counts: &mut std::collections::HashMap, ) -> Result<()> { match event { CortexEvent::Session(SessionEvent::MessageAdded { session_id, .. }) => { - // 更新消息计数 - let count = session_message_counts - .entry(session_id.clone()) - .or_insert(0); - *count += 1; - - // 检查是否需要基于消息数量触发L0/L1生成 - if self.config.generate_layers_every_n_messages > 0 - && *count % self.config.generate_layers_every_n_messages == 0 - { - if let Some(ref generator) = self.layer_generator { - info!( - "Message count threshold reached ({} messages), triggering L0/L1 generation for session: {}", - count, session_id - ); - - // 异步生成L0/L1(带并发限制) - let generator_clone = generator.clone(); - let indexer_clone = self.indexer.clone(); - let session_id_clone = session_id.clone(); - let auto_index = self.config.auto_index; - let semaphore = self.llm_semaphore.clone(); - - tokio::spawn(async move { - // 获取信号量许可(限制并发) - let _permit = semaphore.acquire().await; - let timeline_uri = - format!("cortex://session/{}/timeline", session_id_clone); - - // 生成L0/L1 - match generator_clone.ensure_timeline_layers(&timeline_uri).await { - Ok(stats) => { - info!( - "✓ Periodic L0/L1 generation for {}: total={}, generated={}, failed={}", - session_id_clone, - stats.total, - stats.generated, - stats.failed - ); - - // 生成后索引(如果启用了auto_index) - if auto_index && stats.generated > 0 { - match indexer_clone.index_thread(&session_id_clone).await { - Ok(index_stats) => { - info!( - "✓ L0/L1 indexed for {}: {} indexed", - session_id_clone, index_stats.total_indexed - ); - } - Err(e) => { - warn!( - "✗ Failed to index L0/L1 for {}: {}", - session_id_clone, e - ); - } - } - } - } - Err(e) => { - warn!( - "✗ Periodic L0/L1 generation failed for {}: {}", - session_id_clone, e - ); - } - } - }); - } + if !self.config.auto_index { + return Ok(()); } if self.config.index_on_message { - // 实时索引模式:立即索引 - info!("Real-time indexing session: {}", session_id); - self.index_session(&session_id).await?; + // 实时索引模式:立即索引本 session 的 L2 消息 + info!("AutomationManager: real-time L2 indexing for session {}", session_id); + self.index_session_l2(&session_id).await?; } else { // 批处理模式:加入待处理队列 pending_sessions.insert(session_id); - - // 启动批处理定时器(如果未启动) if batch_timer.is_none() { *batch_timer = Some(tokio::time::Instant::now() + batch_delay); } } } - CortexEvent::Session(SessionEvent::Closed { session_id }) => { - if self.config.index_on_close { - info!( - "Session closed, triggering async full processing: {}", - session_id - ); + // Session 关闭由 MemoryEventCoordinator 全权处理,此处忽略 + CortexEvent::Session(SessionEvent::Closed { .. }) => {} - // 异步执行所有后处理任务(带并发限制) - let extractor = self.extractor.clone(); - let generator = self.layer_generator.clone(); - let indexer = self.indexer.clone(); - let auto_extract = self.config.auto_extract; - let auto_index = self.config.auto_index; - let session_id_clone = session_id.clone(); - let semaphore = self.llm_semaphore.clone(); - - tokio::spawn(async move { - // 获取信号量许可(限制并发) - let _permit = semaphore.acquire().await; - let start = tokio::time::Instant::now(); - - // 1. 自动提取记忆(如果配置了且有extractor) - if auto_extract { - if let Some(ref extractor) = extractor { - match extractor.extract_session(&session_id_clone).await { - Ok(stats) => { - info!( - "✓ Extraction completed for {}: {:?}", - session_id_clone, stats - ); - } - Err(e) => { - warn!( - "✗ Extraction failed for {}: {}", - session_id_clone, e - ); - } - } - } - } - - // 2. 生成 L0/L1 层级文件(如果配置了layer_generator) - if let Some(ref generator) = generator { - info!("Generating L0/L1 layers for session: {}", session_id_clone); - let timeline_uri = - format!("cortex://session/{}/timeline", session_id_clone); - - match generator.ensure_timeline_layers(&timeline_uri).await { - Ok(stats) => { - info!( - "✓ L0/L1 generation completed for {}: total={}, generated={}, failed={}", - session_id_clone, - stats.total, - stats.generated, - stats.failed - ); - } - Err(e) => { - warn!( - "✗ L0/L1 generation failed for {}: {}", - session_id_clone, e - ); - } - } - } - - // 3. 索引整个会话(包括新生成的L0/L1/L2) - if auto_index { - match indexer.index_thread(&session_id_clone).await { - Ok(stats) => { - info!( - "✓ Session {} indexed: {} indexed, {} skipped, {} errors", - session_id_clone, - stats.total_indexed, - stats.total_skipped, - stats.total_errors - ); - } - Err(e) => { - warn!("✗ Failed to index session {}: {}", session_id_clone, e); - } - } - } - - let duration = start.elapsed(); - info!( - "🎉 Session {} post-processing completed in {:.2}s", - session_id_clone, - duration.as_secs_f64() - ); - }); - - info!( - "Session {} close acknowledged, post-processing running in background", - session_id - ); - } - } - - _ => { /* 其他事件暂时忽略 */ } + _ => {} // 其他事件忽略 } Ok(()) } - /// 批量处理待索引的会话 + /// 批量处理待索引的 session async fn flush_batch(&self, pending_sessions: &mut HashSet) -> Result<()> { - info!("Flushing batch: {} sessions", pending_sessions.len()); - + info!("AutomationManager: flushing {} sessions", pending_sessions.len()); for session_id in pending_sessions.drain() { - if let Err(e) = self.index_session(&session_id).await { - warn!("Failed to index session {}: {}", session_id, e); + if let Err(e) = self.index_session_l2(&session_id).await { + warn!("AutomationManager: failed to index session {}: {}", session_id, e); } } - Ok(()) } - /// 索引单个会话 - async fn index_session(&self, session_id: &str) -> Result<()> { + /// 索引单个 session 的 L2 消息内容 + async fn index_session_l2(&self, session_id: &str) -> Result<()> { + let _permit = self.semaphore.acquire().await; match self.indexer.index_thread(session_id).await { Ok(stats) => { info!( - "Session {} indexed: {} indexed, {} skipped, {} errors", + "AutomationManager: session {} L2 indexed ({} indexed, {} skipped, {} errors)", session_id, stats.total_indexed, stats.total_skipped, stats.total_errors ); Ok(()) } Err(e) => { - warn!("Failed to index session {}: {}", session_id, e); + warn!("AutomationManager: failed to index session {}: {}", session_id, e); Err(e) } } } -} \ No newline at end of file +} diff --git a/cortex-mem-core/src/automation/mod.rs b/cortex-mem-core/src/automation/mod.rs index e461e7a..68be3f9 100644 --- a/cortex-mem-core/src/automation/mod.rs +++ b/cortex-mem-core/src/automation/mod.rs @@ -1,15 +1,12 @@ -mod auto_extract; mod indexer; mod layer_generator; mod manager; mod sync; -mod watcher; #[cfg(test)] #[path = "layer_generator_tests.rs"] mod layer_generator_tests; -pub use auto_extract::{AutoExtractConfig, AutoExtractStats, AutoExtractor}; pub use indexer::{AutoIndexer, IndexStats, IndexerConfig}; pub use layer_generator::{ AbstractConfig, GenerationStats, LayerGenerationConfig, LayerGenerator, OverviewConfig, @@ -17,4 +14,3 @@ pub use layer_generator::{ }; pub use manager::{AutomationConfig, AutomationManager}; pub use sync::{SyncConfig, SyncManager, SyncStats}; -pub use watcher::{FsEvent, FsWatcher, WatcherConfig}; diff --git a/cortex-mem-core/src/automation/watcher.rs b/cortex-mem-core/src/automation/watcher.rs deleted file mode 100644 index 107ea41..0000000 --- a/cortex-mem-core/src/automation/watcher.rs +++ /dev/null @@ -1,249 +0,0 @@ -use crate::{ - automation::AutoIndexer, - filesystem::{CortexFilesystem, FilesystemOperations}, - Result, -}; -use std::sync::Arc; -use std::time::Duration; -use tokio::sync::mpsc; -use tracing::{debug, info, warn}; - -/// 文件系统变化事件 -#[derive(Debug, Clone)] -pub enum FsEvent { - /// 新消息添加 - MessageAdded { - thread_id: String, - message_id: String, - }, - /// 消息更新 - MessageUpdated { - thread_id: String, - message_id: String, - }, - /// 线程删除 - ThreadDeleted { thread_id: String }, -} - -/// 文件监听器配置 -#[derive(Debug, Clone)] -pub struct WatcherConfig { - /// 轮询间隔(秒) - pub poll_interval_secs: u64, - /// 是否自动索引 - pub auto_index: bool, - /// 批处理延迟(秒) - pub batch_delay_secs: u64, -} - -impl Default for WatcherConfig { - fn default() -> Self { - Self { - poll_interval_secs: 5, - auto_index: true, - batch_delay_secs: 2, - } - } -} - -/// 文件系统监听器 -/// -/// 监听cortex文件系统的变化,触发自动索引 -pub struct FsWatcher { - filesystem: Arc, - indexer: Arc, - config: WatcherConfig, - event_tx: mpsc::UnboundedSender, - event_rx: Option>, -} - -impl FsWatcher { - /// 创建新的监听器 - pub fn new( - filesystem: Arc, - indexer: Arc, - config: WatcherConfig, - ) -> Self { - let (event_tx, event_rx) = mpsc::unbounded_channel(); - - Self { - filesystem, - indexer, - config, - event_tx, - event_rx: Some(event_rx), - } - } - - /// 启动监听器 - pub async fn start(mut self) -> Result<()> { - info!("Starting filesystem watcher with {:?}", self.config); - - let event_rx = self - .event_rx - .take() - .ok_or_else(|| crate::Error::Other("Event receiver already taken".to_string()))?; - - // 启动事件处理任务 - let indexer = self.indexer.clone(); - let config = self.config.clone(); - tokio::spawn(async move { - Self::process_events(event_rx, indexer, config).await; - }); - - // 启动轮询任务 - self.poll_filesystem().await - } - - /// 轮询文件系统变化 - async fn poll_filesystem(&self) -> Result<()> { - let mut last_thread_state = std::collections::HashMap::new(); - - loop { - tokio::time::sleep(Duration::from_secs(self.config.poll_interval_secs)).await; - - match self.scan_for_changes(&mut last_thread_state).await { - Ok(events) => { - for event in events { - if let Err(e) = self.event_tx.send(event) { - warn!("Failed to send event: {}", e); - } - } - } - Err(e) => { - warn!("Error scanning filesystem: {}", e); - } - } - } - } - - /// 扫描文件系统变化 - async fn scan_for_changes( - &self, - last_state: &mut std::collections::HashMap>, - ) -> Result> { - let threads_uri = "cortex://session"; - let entries = self.filesystem.list(threads_uri).await?; - - let mut events = Vec::new(); - - for entry in entries { - if !entry.is_directory || entry.name.starts_with('.') { - continue; - } - - let thread_id = entry.name.clone(); - let timeline_uri = format!("cortex://session/{}/timeline", thread_id); - - // 获取当前线程的所有消息 - match self.get_message_ids(&timeline_uri).await { - Ok(current_messages) => { - let previous_messages = last_state.get(&thread_id); - - if let Some(prev) = previous_messages { - // 检测新消息 - for msg_id in ¤t_messages { - if !prev.contains(msg_id) { - debug!("New message detected: {} in thread {}", msg_id, thread_id); - events.push(FsEvent::MessageAdded { - thread_id: thread_id.clone(), - message_id: msg_id.clone(), - }); - } - } - } - - last_state.insert(thread_id, current_messages); - } - Err(e) => { - warn!("Failed to scan thread {}: {}", thread_id, e); - } - } - } - - Ok(events) - } - - /// 获取线程中的所有消息ID - async fn get_message_ids(&self, timeline_uri: &str) -> Result> { - let mut message_ids = Vec::new(); - self.collect_message_ids_recursive(timeline_uri, &mut message_ids) - .await?; - Ok(message_ids) - } - - /// 递归收集消息ID - fn collect_message_ids_recursive<'a>( - &'a self, - uri: &'a str, - message_ids: &'a mut Vec, - ) -> std::pin::Pin> + Send + 'a>> { - Box::pin(async move { - let entries = self.filesystem.as_ref().list(uri).await?; - - for entry in entries { - if entry.is_directory && !entry.name.starts_with('.') { - self.collect_message_ids_recursive(&entry.uri, message_ids) - .await?; - } else if entry.name.ends_with(".md") && !entry.name.starts_with('.') { - // 从文件名提取消息ID - if let Some(msg_id) = entry.name.strip_suffix(".md") { - message_ids.push(msg_id.to_string()); - } - } - } - - Ok(()) - }) - } - - /// 处理事件 - async fn process_events( - mut event_rx: mpsc::UnboundedReceiver, - indexer: Arc, - config: WatcherConfig, - ) { - let mut pending_threads = std::collections::HashSet::new(); - - loop { - tokio::select! { - Some(event) = event_rx.recv() => { - match event { - FsEvent::MessageAdded { thread_id, message_id } => { - info!("Processing new message: {} in thread {}", message_id, thread_id); - if config.auto_index { - pending_threads.insert(thread_id); - } - } - FsEvent::MessageUpdated { thread_id, message_id } => { - debug!("Message updated: {} in thread {}", message_id, thread_id); - if config.auto_index { - pending_threads.insert(thread_id); - } - } - FsEvent::ThreadDeleted { thread_id } => { - info!("Thread deleted: {}", thread_id); - pending_threads.remove(&thread_id); - } - } - } - _ = tokio::time::sleep(Duration::from_secs(config.batch_delay_secs)) => { - // 批量处理待索引的线程 - if !pending_threads.is_empty() { - let threads: Vec<_> = pending_threads.drain().collect(); - for thread_id in threads { - match indexer.index_thread(&thread_id).await { - Ok(stats) => { - info!("Auto-indexed thread {}: {} messages", thread_id, stats.total_indexed); - } - Err(e) => { - warn!("Failed to auto-index thread {}: {}", thread_id, e); - } - } - } - } - } - } - } - } -} diff --git a/cortex-mem-core/src/extraction/extractor.rs b/cortex-mem-core/src/extraction/extractor.rs deleted file mode 100644 index 0481764..0000000 --- a/cortex-mem-core/src/extraction/extractor.rs +++ /dev/null @@ -1,363 +0,0 @@ -use super::types::{ - ExtractedDecision, ExtractedEntity, ExtractedFact, ExtractedMemories, MemoryImportance, -}; -use crate::{CortexFilesystem, FilesystemOperations, LLMClient, Message, MessageRole, Result}; -use std::sync::Arc; - -/// Extraction configuration -#[derive(Debug, Clone)] -pub struct ExtractionConfig { - pub min_confidence: f32, - pub extract_facts: bool, - pub extract_decisions: bool, - pub extract_entities: bool, - pub max_messages_per_batch: usize, -} - -impl Default for ExtractionConfig { - fn default() -> Self { - Self { - min_confidence: 0.6, - extract_facts: true, - extract_decisions: true, - extract_entities: true, - max_messages_per_batch: 50, - } - } -} - -/// Memory extractor for analyzing conversations -pub struct MemoryExtractor { - filesystem: Arc, - llm_client: Arc, - config: ExtractionConfig, -} - -impl MemoryExtractor { - /// Create a new memory extractor - pub fn new( - filesystem: Arc, - llm_client: Arc, - config: ExtractionConfig, - ) -> Self { - Self { - filesystem, - llm_client, - config, - } - } - - /// Extract memories from a list of messages - pub async fn extract_from_messages( - &self, - thread_id: &str, - messages: &[Message], - ) -> Result { - let mut extracted = ExtractedMemories::new(thread_id); - - // Build conversation context - let conversation = self.build_conversation_context(messages); - - // Extract facts - if self.config.extract_facts { - let facts = self.extract_facts(&conversation, messages).await?; - for fact in facts { - extracted.add_fact(fact); - } - } - - // Extract decisions - if self.config.extract_decisions { - let decisions = self.extract_decisions(&conversation, messages).await?; - for decision in decisions { - extracted.add_decision(decision); - } - } - - // Extract entities - if self.config.extract_entities { - let entities = self.extract_entities(&conversation, messages).await?; - for entity in entities { - extracted.add_entity(entity); - } - } - - Ok(extracted) - } - - /// Extract memories from a thread - pub async fn extract_from_thread(&self, thread_id: &str) -> Result { - // List all messages in the thread - let timeline_uri = format!("cortex://session/{}/timeline", thread_id); - - // Recursively collect all message files - let mut message_contents = Vec::new(); - if self.filesystem.exists(&timeline_uri).await? { - message_contents = self.collect_messages_recursive(&timeline_uri).await?; - } - - if message_contents.is_empty() { - // Return empty extraction if no messages found - return Ok(ExtractedMemories::new(thread_id)); - } - - // Build messages from markdown content - let mut messages = Vec::new(); - for (_uri, content) in &message_contents { - // Parse markdown to extract message info - if let Some(message) = self.parse_message_markdown(content) { - messages.push(message); - } - } - - self.extract_from_messages(thread_id, &messages).await - } - - /// Recursively collect all message files from timeline - fn collect_messages_recursive<'a>( - &'a self, - uri: &'a str, - ) -> std::pin::Pin< - Box>> + Send + 'a>, - > { - Box::pin(async move { - let mut result = Vec::new(); - - let entries = self.filesystem.list(uri).await?; - for entry in entries { - if entry.is_directory && !entry.name.starts_with('.') { - // Recursively explore subdirectories - let sub_messages = self.collect_messages_recursive(&entry.uri).await?; - result.extend(sub_messages); - } else if entry.name.ends_with(".md") && !entry.name.starts_with('.') { - // Read message file - if let Ok(content) = self.filesystem.read(&entry.uri).await { - result.push((entry.uri.clone(), content)); - } - } - } - - Ok(result) - }) - } - - /// Parse markdown message content to extract Message - fn parse_message_markdown(&self, content: &str) -> Option { - // Simple markdown parsing - look for role and content - let mut role = MessageRole::User; - let mut message_content = String::new(); - let _id = uuid::Uuid::new_v4(); - - for line in content.lines() { - if line.starts_with("# 👤 User") { - role = MessageRole::User; - } else if line.starts_with("# 🤖 Assistant") { - role = MessageRole::Assistant; - } else if line.starts_with("**ID**: `") { - // Extract ID (currently not used, but parsing for future use) - #[allow(unused_variables)] - if let Some(id_str) = line - .strip_prefix("**ID**: `") - .and_then(|s| s.strip_suffix("`")) - { - // ID parsing logic here if needed in future - } - } else if line.starts_with("## Content") { - // Content starts after this line - continue; - } else if !line.is_empty() && !line.starts_with("**") && !line.starts_with("#") { - // Actual content line - if !message_content.is_empty() { - message_content.push('\n'); - } - message_content.push_str(line); - } - } - - if message_content.is_empty() { - return None; - } - - Some(Message::new(role, message_content)) - } - - /// Build conversation context from messages - fn build_conversation_context(&self, messages: &[Message]) -> String { - let mut context = String::new(); - - for (i, msg) in messages.iter().enumerate() { - context.push_str(&format!("[{}] {:?}: {}\n", i + 1, msg.role, msg.content)); - } - - context - } - - /// Extract facts using LLM - async fn extract_facts( - &self, - conversation: &str, - messages: &[Message], - ) -> Result> { - let prompt = format!( - r#"Analyze the following conversation and extract factual statements. - -For each fact, provide: -- content: The factual statement -- subject: The main subject of the fact (optional) -- confidence: Confidence level (0.0-1.0) -- importance: low, medium, high, or critical - -Return a JSON array of facts. - -Conversation: -{} - -Return JSON only, no additional text."#, - conversation - ); - - // Call LLM (using placeholder implementation) - let response = self.llm_client.extract_memories(&prompt).await?; - - // Parse response into facts - convert from LLM client's Fact type - let mut facts = Vec::new(); - for llm_fact in &response.facts { - let fact = ExtractedFact::new(&llm_fact.content) - .with_confidence(llm_fact.confidence) - .with_importance(MemoryImportance::Medium); - - // Add source URIs - let mut fact_with_sources = fact; - for msg in messages { - fact_with_sources - .source_uris - .push(format!("cortex://session/temp/{}", msg.id)); - } - - if fact_with_sources.confidence >= self.config.min_confidence { - facts.push(fact_with_sources); - } - } - - Ok(facts) - } - - /// Extract decisions using LLM - async fn extract_decisions( - &self, - conversation: &str, - messages: &[Message], - ) -> Result> { - let prompt = format!( - r#"Analyze the following conversation and extract decisions that were made. - -For each decision, provide: -- decision: The decision that was made -- context: The context in which it was made -- rationale: The reasoning behind the decision (optional) -- confidence: Confidence level (0.0-1.0) -- importance: low, medium, high, or critical - -Return a JSON array of decisions. - -Conversation: -{} - -Return JSON only, no additional text."#, - conversation - ); - - let response = self.llm_client.extract_memories(&prompt).await?; - - // Convert from LLM client's Decision type - let mut decisions = Vec::new(); - for llm_decision in &response.decisions { - let decision = ExtractedDecision::new( - &llm_decision.decision, - llm_decision - .rationale - .as_ref() - .map(|s| s.as_str()) - .unwrap_or("No rationale"), - ) - .with_confidence(llm_decision.confidence) - .with_importance(MemoryImportance::Medium); - - let mut decision_with_sources = decision; - for msg in messages { - decision_with_sources - .source_uris - .push(format!("cortex://session/temp/{}", msg.id)); - } - - if decision_with_sources.confidence >= self.config.min_confidence { - decisions.push(decision_with_sources); - } - } - - Ok(decisions) - } - - /// Extract entities using LLM - async fn extract_entities( - &self, - conversation: &str, - _messages: &[Message], - ) -> Result> { - let prompt = format!( - r#"Analyze the following conversation and extract entities (people, organizations, products, etc.). - -For each entity, provide: -- name: The entity name -- type: The entity type (person, organization, product, etc.) -- description: Brief description (optional) -- confidence: Confidence level (0.0-1.0) - -Return a JSON array of entities. - -Conversation: -{} - -Return JSON only, no additional text."#, - conversation - ); - - let response = self.llm_client.extract_memories(&prompt).await?; - - // Convert from LLM client's Entity type - let mut entities = Vec::new(); - for llm_entity in &response.entities { - let entity = ExtractedEntity::new(&llm_entity.name, &llm_entity.entity_type) - .with_description( - llm_entity - .description - .as_ref() - .map(|s| s.as_str()) - .unwrap_or(""), - ) - .with_confidence(llm_entity.confidence as f64); - - entities.push(entity); - } - - Ok(entities) - } - - /// Save extracted memories to filesystem - pub async fn save_extraction( - &self, - thread_id: &str, - memories: &ExtractedMemories, - ) -> Result { - let extraction_uri = format!( - "cortex://session/{}/extractions/{}.md", - thread_id, - memories.extracted_at.format("%Y%m%d_%H%M%S") - ); - - let markdown = memories.to_markdown(); - self.filesystem.write(&extraction_uri, &markdown).await?; - - Ok(extraction_uri) - } -} diff --git a/cortex-mem-core/src/extraction/mod.rs b/cortex-mem-core/src/extraction/mod.rs deleted file mode 100644 index 558d182..0000000 --- a/cortex-mem-core/src/extraction/mod.rs +++ /dev/null @@ -1,8 +0,0 @@ -pub mod extractor; -pub mod types; - -pub use extractor::{MemoryExtractor, ExtractionConfig}; -pub use types::{ - ExtractedMemories, ExtractedFact, ExtractedDecision, - ExtractedEntity, ExtractionCategory, MemoryImportance, -}; diff --git a/cortex-mem-core/src/extraction/types.rs b/cortex-mem-core/src/extraction/types.rs deleted file mode 100644 index 40255aa..0000000 --- a/cortex-mem-core/src/extraction/types.rs +++ /dev/null @@ -1,274 +0,0 @@ -use chrono::{DateTime, Utc}; -use serde::{Deserialize, Serialize}; - -/// Memory importance level -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] -#[serde(rename_all = "lowercase")] -pub enum MemoryImportance { - Low, - Medium, - High, - Critical, -} - -impl Default for MemoryImportance { - fn default() -> Self { - Self::Medium - } -} - -/// Memory extraction category -/// -/// Used for classifying LLM extraction results. -/// For v2.5 memory indexing, use [`crate::memory_index::MemoryType`] instead. -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -#[serde(rename_all = "snake_case")] -pub enum ExtractionCategory { - Fact, - Decision, - Entity, - Preference, - Skill, - Goal, -} - -/// Extracted fact from conversation -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ExtractedFact { - pub content: String, - pub subject: Option, - pub confidence: f32, - pub importance: MemoryImportance, - pub source_uris: Vec, - pub timestamp: DateTime, -} - -impl ExtractedFact { - pub fn new(content: impl Into) -> Self { - Self { - content: content.into(), - subject: None, - confidence: 0.8, - importance: MemoryImportance::Medium, - source_uris: Vec::new(), - timestamp: Utc::now(), - } - } - - pub fn with_subject(mut self, subject: impl Into) -> Self { - self.subject = Some(subject.into()); - self - } - - pub fn with_confidence(mut self, confidence: f32) -> Self { - self.confidence = confidence.clamp(0.0, 1.0); - self - } - - pub fn with_importance(mut self, importance: MemoryImportance) -> Self { - self.importance = importance; - self - } - - pub fn add_source(mut self, uri: impl Into) -> Self { - self.source_uris.push(uri.into()); - self - } -} - -/// Extracted decision from conversation -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ExtractedDecision { - pub decision: String, - pub context: String, - pub rationale: Option, - pub confidence: f32, - pub importance: MemoryImportance, - pub source_uris: Vec, - pub timestamp: DateTime, -} - -impl ExtractedDecision { - pub fn new(decision: impl Into, context: impl Into) -> Self { - Self { - decision: decision.into(), - context: context.into(), - rationale: None, - confidence: 0.8, - importance: MemoryImportance::Medium, - source_uris: Vec::new(), - timestamp: Utc::now(), - } - } - - pub fn with_rationale(mut self, rationale: impl Into) -> Self { - self.rationale = Some(rationale.into()); - self - } - - pub fn with_confidence(mut self, confidence: f32) -> Self { - self.confidence = confidence.clamp(0.0, 1.0); - self - } - - pub fn with_importance(mut self, importance: MemoryImportance) -> Self { - self.importance = importance; - self - } - - pub fn add_source(mut self, uri: impl Into) -> Self { - self.source_uris.push(uri.into()); - self - } -} - -/// Extracted entity (person, organization, etc.) -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ExtractedEntity { - pub name: String, - pub entity_type: String, - pub description: Option, - pub attributes: Vec<(String, String)>, - pub source_uris: Vec, - pub timestamp: DateTime, -} - -impl ExtractedEntity { - pub fn new(name: impl Into, entity_type: impl Into) -> Self { - Self { - name: name.into(), - entity_type: entity_type.into(), - description: None, - attributes: Vec::new(), - source_uris: Vec::new(), - timestamp: Utc::now(), - } - } - - pub fn with_description(mut self, description: impl Into) -> Self { - self.description = Some(description.into()); - self - } - - pub fn with_confidence(self, _confidence: f64) -> Self { - // Just return self - ExtractedEntity doesn't store confidence - self - } - - pub fn add_attribute(mut self, key: impl Into, value: impl Into) -> Self { - self.attributes.push((key.into(), value.into())); - self - } - - pub fn add_source(mut self, uri: impl Into) -> Self { - self.source_uris.push(uri.into()); - self - } -} - -/// Complete extraction result -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ExtractedMemories { - pub facts: Vec, - pub decisions: Vec, - pub entities: Vec, - pub extracted_at: DateTime, - pub thread_id: String, -} - -impl ExtractedMemories { - pub fn new(thread_id: impl Into) -> Self { - Self { - facts: Vec::new(), - decisions: Vec::new(), - entities: Vec::new(), - extracted_at: Utc::now(), - thread_id: thread_id.into(), - } - } - - pub fn add_fact(&mut self, fact: ExtractedFact) { - self.facts.push(fact); - } - - pub fn add_decision(&mut self, decision: ExtractedDecision) { - self.decisions.push(decision); - } - - pub fn add_entity(&mut self, entity: ExtractedEntity) { - self.entities.push(entity); - } - - pub fn total_count(&self) -> usize { - self.facts.len() + self.decisions.len() + self.entities.len() - } - - pub fn is_empty(&self) -> bool { - self.total_count() == 0 - } - - /// Convert to markdown format - pub fn to_markdown(&self) -> String { - let mut md = String::new(); - - md.push_str(&format!("# Extracted Memories\n\n")); - md.push_str(&format!("**Thread**: {}\n", self.thread_id)); - md.push_str(&format!( - "**Extracted**: {}\n", - self.extracted_at.format("%Y-%m-%d %H:%M:%S UTC") - )); - md.push_str(&format!("**Total**: {} memories\n\n", self.total_count())); - - // Facts section - if !self.facts.is_empty() { - md.push_str(&format!("## Facts ({})\n\n", self.facts.len())); - for (i, fact) in self.facts.iter().enumerate() { - md.push_str(&format!("### Fact {}\n\n", i + 1)); - md.push_str(&format!("{}\n\n", fact.content)); - if let Some(ref subject) = fact.subject { - md.push_str(&format!("**Subject**: {}\n", subject)); - } - md.push_str(&format!("**Confidence**: {:.2}\n", fact.confidence)); - md.push_str(&format!("**Importance**: {:?}\n", fact.importance)); - md.push_str("\n"); - } - } - - // Decisions section - if !self.decisions.is_empty() { - md.push_str(&format!("## Decisions ({})\n\n", self.decisions.len())); - for (i, decision) in self.decisions.iter().enumerate() { - md.push_str(&format!("### Decision {}\n\n", i + 1)); - md.push_str(&format!("**Decision**: {}\n\n", decision.decision)); - md.push_str(&format!("**Context**: {}\n\n", decision.context)); - if let Some(ref rationale) = decision.rationale { - md.push_str(&format!("**Rationale**: {}\n\n", rationale)); - } - md.push_str(&format!("**Confidence**: {:.2}\n", decision.confidence)); - md.push_str(&format!("**Importance**: {:?}\n", decision.importance)); - md.push_str("\n"); - } - } - - // Entities section - if !self.entities.is_empty() { - md.push_str(&format!("## Entities ({})\n\n", self.entities.len())); - for (i, entity) in self.entities.iter().enumerate() { - md.push_str(&format!("### Entity {}: {}\n\n", i + 1, entity.name)); - md.push_str(&format!("**Type**: {}\n", entity.entity_type)); - if let Some(ref description) = entity.description { - md.push_str(&format!("**Description**: {}\n", description)); - } - if !entity.attributes.is_empty() { - md.push_str("\n**Attributes**:\n\n"); - for (key, value) in &entity.attributes { - md.push_str(&format!("- **{}**: {}\n", key, value)); - } - } - md.push_str("\n"); - } - } - - md - } -} diff --git a/cortex-mem-core/src/lib.rs b/cortex-mem-core/src/lib.rs index 35e8d53..9a8684e 100644 --- a/cortex-mem-core/src/lib.rs +++ b/cortex-mem-core/src/lib.rs @@ -61,7 +61,6 @@ pub mod types; pub mod automation; pub mod builder; pub mod embedding; -pub mod extraction; pub mod filesystem; pub mod layers; pub mod llm; @@ -89,12 +88,11 @@ pub use events::{CortexEvent, EventBus, FilesystemEvent, SessionEvent}; pub use types::*; pub use automation::{ - AutoExtractConfig, AutoExtractor, AutoIndexer, AutomationConfig, AutomationManager, FsWatcher, - IndexStats, IndexerConfig, SyncConfig, SyncManager, SyncStats, WatcherConfig, + AutoIndexer, AutomationConfig, AutomationManager, + IndexStats, IndexerConfig, SyncConfig, SyncManager, SyncStats, }; pub use builder::{CortexMem, CortexMemBuilder}; -pub use extraction::ExtractionConfig; -// Note: MemoryExtractor is also exported from session module +// Note: MemoryExtractor is exported from session module pub use embedding::{EmbeddingClient, EmbeddingConfig}; pub use filesystem::{CortexFilesystem, FilesystemOperations}; pub use llm::LLMClient; diff --git a/cortex-mem-core/src/memory_event_coordinator.rs b/cortex-mem-core/src/memory_event_coordinator.rs index c5a3f17..abc9fec 100644 --- a/cortex-mem-core/src/memory_event_coordinator.rs +++ b/cortex-mem-core/src/memory_event_coordinator.rs @@ -105,18 +105,6 @@ impl MemoryEventCoordinator { ) } - /// 发送事件到协调器(增加 pending_tasks 计数) - /// - /// 这个方法应该在发送事件时调用,确保 flush_and_wait 能正确等待事件处理完成 - pub fn send_event(&self, _event: MemoryEvent) -> Result<()> { - // 先增加计数 - self.pending_tasks.fetch_add(1, Ordering::SeqCst); - // 发送事件(通过内部 channel) - // 注意:这里需要通过外部保存的 sender 发送 - // 由于架构限制,这个方法主要用于文档说明正确的使用方式 - Ok(()) - } - /// Create a new memory event coordinator with custom config pub fn new_with_config( filesystem: Arc, @@ -686,6 +674,19 @@ impl MemoryEventCoordinator { Ok(()) } + /// 同步处理 session 关闭:记忆提取 → user/agent 文件写入 → L0/L1 生成 → 向量同步 + /// + /// 调用方 `.await` 后可保证所有副作用已完成,不存在异步竞争。 + /// 供 `MemoryOperations::close_session_sync` 直接调用,无需经过 channel。 + pub async fn process_session_closed( + &self, + session_id: &str, + user_id: &str, + agent_id: &str, + ) -> Result<()> { + self.on_session_closed(session_id, user_id, agent_id).await + } + /// Handle session closed event (the main trigger for memory extraction) async fn on_session_closed( &self, diff --git a/cortex-mem-core/src/session/manager.rs b/cortex-mem-core/src/session/manager.rs index 1060072..6435c6f 100644 --- a/cortex-mem-core/src/session/manager.rs +++ b/cortex-mem-core/src/session/manager.rs @@ -326,53 +326,53 @@ impl SessionManager { Ok(()) } - /// Close a session - /// - /// IMPORTANT: Layer generation is now fully asynchronous via MemoryEventCoordinator. - /// This method no longer generates layers synchronously to avoid blocking. - /// The SessionClosed event will trigger: - /// 1. Memory extraction - /// 2. Timeline layer generation (L0/L1) - /// 3. Vector sync - pub async fn close_session(&mut self, thread_id: &str) -> Result { + /// Update session metadata to closed state only (no events emitted). + /// + /// This is a low-level method used by `MemoryOperations::close_session_sync` which + /// handles the full processing pipeline synchronously via `MemoryEventCoordinator`. + /// Use this instead of `close_session` when you want to await memory extraction + /// and L0/L1 generation before returning. + pub async fn close_session_metadata_only(&mut self, thread_id: &str) -> Result { let mut metadata = self.load_session(thread_id).await?; metadata.close(); self.update_session(&metadata).await?; - // 🚫 REMOVED: Synchronous layer generation - // Layer generation is now handled asynchronously by MemoryEventCoordinator - // This prevents blocking and avoids duplicate LLM calls - // - // Old code that was removed: - // if let Some(ref llm_client) = self.llm_client { - // let layer_manager = LayerManager::new(...); - // layer_manager.generate_timeline_layers(&timeline_uri).await?; - // } - - // 发布会话关闭事件 + // Publish close event on the legacy EventBus (for AutomationManager etc.) if let Some(ref bus) = self.event_bus { let _ = bus.publish(CortexEvent::Session(SessionEvent::Closed { session_id: thread_id.to_string(), })); } - - // v2.5: 发送记忆事件给协调器处理(异步) - // MemoryEventCoordinator will handle: - // 1. Memory extraction from session - // 2. Timeline layer generation - // 3. Vector sync + + info!("Session {} metadata closed (event emission skipped; caller handles processing)", thread_id); + Ok(metadata) + } + + /// Close a session and asynchronously trigger memory extraction via channel. + /// + /// The `SessionClosed` event is sent to the `MemoryEventCoordinator` channel and + /// processed in a background task. The caller has **no guarantee** that memory + /// extraction or L0/L1 generation has finished when this returns. + /// + /// Prefer `MemoryOperations::close_session_sync` when you need to await completion + /// (e.g., in exit flows). This method is retained for service scenarios where + /// fire-and-forget is acceptable. + pub async fn close_session(&mut self, thread_id: &str) -> Result { + let metadata = self.close_session_metadata_only(thread_id).await?; + + // v2.5: fire-and-forget via channel if let Some(ref tx) = self.memory_event_tx { let user_id = metadata.user_id.clone().unwrap_or_else(|| "default".to_string()); let agent_id = metadata.agent_id.clone().unwrap_or_else(|| "default".to_string()); - + let _ = tx.send(crate::memory_events::MemoryEvent::SessionClosed { session_id: thread_id.to_string(), user_id: user_id.clone(), agent_id: agent_id.clone(), }); - + info!( - "Session {} closed, SessionClosed event sent for async processing (user_id={}, agent_id={})", + "Session {} closed, SessionClosed event queued for async processing (user_id={}, agent_id={})", thread_id, user_id, agent_id ); } else { diff --git a/cortex-mem-mcp/src/service.rs b/cortex-mem-mcp/src/service.rs index 8ccecc0..67ddeea 100644 --- a/cortex-mem-mcp/src/service.rs +++ b/cortex-mem-mcp/src/service.rs @@ -804,27 +804,19 @@ impl MemoryMcpService { let thread_id = ¶ms.0.thread_id; - match self.operations.close_session(thread_id).await { + // Use close_session_sync which blocks until: + // 1. Memory extraction (LLM call on session timeline) + // 2. user/agent memory files written + // 3. L0/L1 layer files generated for all affected directories + // 4. Session timeline synced to vector store + match self.operations.close_session_sync(thread_id).await { Ok(_) => { - info!( - "Session closed, waiting for background tasks: {}", - thread_id - ); - - // Wait for background memory extraction, L0/L1 generation, and indexing to complete - let completed = self.operations.flush_and_wait(Some(1)).await; - - let message = if completed { - "Session closed. All background tasks (L0/L1 generation, memory extraction, indexing) completed successfully.".to_string() - } else { - "Session closed. Background tasks initiated but may still be in progress." - .to_string() - }; + info!("Session {} closed and fully processed (sync)", thread_id); Ok(Json(CloseSessionResult { success: true, thread_id: thread_id.clone(), - message, + message: "Session closed. All processing (memory extraction, L0/L1 generation, vector sync) completed synchronously.".to_string(), })) } Err(e) => { diff --git a/cortex-mem-service/src/error.rs b/cortex-mem-service/src/error.rs index e901bd8..7f41d4f 100644 --- a/cortex-mem-service/src/error.rs +++ b/cortex-mem-service/src/error.rs @@ -8,6 +8,7 @@ use serde_json::json; pub type Result = std::result::Result; #[derive(Debug)] +#[allow(dead_code)] pub enum AppError { Internal(String), NotFound(String), diff --git a/cortex-mem-service/src/handlers/automation.rs b/cortex-mem-service/src/handlers/automation.rs index f555102..7336517 100644 --- a/cortex-mem-service/src/handlers/automation.rs +++ b/cortex-mem-service/src/handlers/automation.rs @@ -10,63 +10,39 @@ use crate::{ state::AppState, }; -/// Trigger memory extraction for a session +/// Trigger memory extraction for a session. +/// +/// This endpoint is now a convenience wrapper over the standard session-close pipeline. +/// It marks the session as closed, runs memory extraction + L0/L1 generation synchronously +/// via `MemoryEventCoordinator`, and returns a summary of the extracted data. +/// +/// Note: the `cortex-mem-service` REST layer does not hold a `MemoryEventCoordinator` +/// reference directly (it uses `CortexMem` which wires up the coordinator internally). +/// For now, this endpoint delegates to `SessionManager::close_session` which sends a +/// `SessionClosed` event that the coordinator handles asynchronously. pub async fn trigger_extraction( State(state): State>, Path(thread_id): Path, ) -> Result>> { - use cortex_mem_core::extraction::{ExtractionConfig, MemoryExtractor}; - - // Check if LLM client is available - let llm_client = state.llm_client.as_ref() - .ok_or_else(|| AppError::BadRequest( - "LLM client not configured. Set LLM_API_BASE_URL, LLM_API_KEY, and LLM_MODEL environment variables.".to_string() - ))?; - - // Create extraction config - let config = ExtractionConfig { - extract_facts: true, - extract_decisions: true, - extract_entities: true, - min_confidence: 0.5, - max_messages_per_batch: 50, - }; - - // Create extractor - let extractor = MemoryExtractor::new(state.filesystem.clone(), llm_client.clone(), config); - - // Get message storage - let message_storage = cortex_mem_core::MessageStorage::new(state.filesystem.clone()); - - // List all message URIs for the thread - let message_uris = message_storage.list_messages(&thread_id).await?; - - // Load messages - let mut messages = Vec::new(); - for uri in message_uris { - if let Ok(msg) = message_storage.load_message(&uri).await { - messages.push(msg); - } - } - - if messages.is_empty() { - return Err(AppError::NotFound(format!( - "No messages found in thread {}", - thread_id - ))); + // Ensure LLM is available (coordinator needs it) + if state.llm_client.is_none() { + return Err(AppError::BadRequest( + "LLM client not configured. Set LLM_API_BASE_URL, LLM_API_KEY, and LLM_MODEL \ + environment variables." + .to_string(), + )); } - // Extract memories - let extraction_result = extractor - .extract_from_messages(&thread_id, &messages) - .await?; + // Close the session — this sends a SessionClosed event to MemoryEventCoordinator which + // handles memory extraction, L0/L1 generation and vector sync asynchronously. + let mut session_mgr = state.session_manager.write().await; + session_mgr.close_session(&thread_id).await?; let response = serde_json::json!({ "thread_id": thread_id, - "message_count": messages.len(), - "facts": extraction_result.facts, - "decisions": extraction_result.decisions, - "entities": extraction_result.entities, + "status": "extraction_triggered", + "message": "Session closed. Memory extraction and L0/L1 generation are being processed \ + asynchronously by MemoryEventCoordinator.", }); Ok(Json(ApiResponse::success(response))) diff --git a/cortex-mem-service/src/state.rs b/cortex-mem-service/src/state.rs index e02bc8c..aa0abce 100644 --- a/cortex-mem-service/src/state.rs +++ b/cortex-mem-service/src/state.rs @@ -11,6 +11,7 @@ use tokio::sync::RwLock; pub struct AppState { #[allow(dead_code)] pub cortex: Arc, + #[allow(dead_code)] pub filesystem: Arc, pub session_manager: Arc>, pub llm_client: Option>, diff --git a/cortex-mem-tools/src/operations.rs b/cortex-mem-tools/src/operations.rs index ba5286e..f875253 100644 --- a/cortex-mem-tools/src/operations.rs +++ b/cortex-mem-tools/src/operations.rs @@ -5,9 +5,8 @@ use cortex_mem_core::{ SessionConfig, SessionManager, automation::{ - AbstractConfig, AutoExtractConfig, AutoExtractor, AutoIndexer, AutomationConfig, - AutomationManager, IndexerConfig, LayerGenerationConfig, LayerGenerator, OverviewConfig, - SyncConfig, SyncManager, + AbstractConfig, AutoIndexer, AutomationConfig, AutomationManager, IndexerConfig, + LayerGenerationConfig, LayerGenerator, OverviewConfig, SyncConfig, SyncManager, }, embedding::{EmbeddingClient, EmbeddingConfig}, events::EventBus, @@ -31,7 +30,6 @@ pub struct MemoryOperations { pub(crate) session_manager: Arc>, pub(crate) layer_manager: Arc, pub(crate) vector_engine: Arc, - pub(crate) auto_extractor: Option>, pub(crate) layer_generator: Option>, pub(crate) auto_indexer: Option>, @@ -47,7 +45,7 @@ pub struct MemoryOperations { pub(crate) memory_event_tx: Option>, - /// v2.5: 事件协调器引用,用于等待后台任务完成 + /// v2.5: 事件协调器引用,用于同步等待后台处理完成 pub(crate) event_coordinator: Option>, } @@ -67,11 +65,6 @@ impl MemoryOperations { &self.session_manager } - /// Get the auto extractor (for manual extraction on exit) - pub fn auto_extractor(&self) -> Option<&Arc> { - self.auto_extractor.as_ref() - } - /// Get the layer generator (for manual layer generation on exit) pub fn layer_generator(&self) -> Option<&Arc> { self.layer_generator.as_ref() @@ -121,7 +114,7 @@ impl MemoryOperations { filesystem.initialize().await?; // 创建EventBus用于自动化 - let (event_bus, mut event_rx_main) = EventBus::new(); + let (event_bus, event_rx_main) = EventBus::new(); // Initialize Qdrant first (needed for MemoryEventCoordinator) tracing::info!("Initializing Qdrant vector store: {}", qdrant_url); @@ -198,19 +191,7 @@ impl MemoryOperations { // 使用传入的user_id,如果没有则使用tenant_id let actual_user_id = user_id.unwrap_or_else(|| tenant_id.clone()); - // 🔧 创建AutoExtractor(简化配置,移除了save_user_memories和save_agent_memories) - let auto_extract_config = AutoExtractConfig { - min_message_count: 5, - extract_on_close: false, // v2.5: 禁用旧机制,使用新的 MemoryEventCoordinator - }; - let auto_extractor = Arc::new(AutoExtractor::with_user_id( - filesystem.clone(), - llm_client.clone(), - auto_extract_config, - &actual_user_id, - )); - - // 创建AutoIndexer用于实时索引 + // 创建 AutoIndexer 用于 L2 消息实时索引 let indexer_config = IndexerConfig { auto_index: true, batch_size: 10, @@ -223,19 +204,29 @@ impl MemoryOperations { indexer_config, )); - // 创建AutomationManager + // 创建 AutomationManager:仅负责 L2 消息实时索引 + // L0/L1 生成、记忆提取、向量同步均由 MemoryEventCoordinator 处理 let automation_config = AutomationConfig { auto_index: true, - auto_extract: false, // Extract由单独的监听器处理 - index_on_message: true, // ✅ 消息时自动索引L2 - index_on_close: true, // ✅ Session关闭时生成L0/L1并索引 + index_on_message: true, // 消息添加时实时索引 L2 index_batch_delay: 1, - auto_generate_layers_on_startup: false, // 启动时不生成(避免阻塞) - generate_layers_every_n_messages: 5, // 每5条消息生成一次L0/L1 - max_concurrent_llm_tasks: 3, // 最多3个并发LLM任务 + max_concurrent_tasks: 3, }; + let automation_manager = AutomationManager::new(auto_indexer.clone(), automation_config); + + // 启动 AutomationManager(直接消费 EventBus 事件,无需分裂转发) + let tenant_id_for_automation = tenant_id.clone(); + tokio::spawn(async move { + tracing::info!( + "AutomationManager started for tenant {} (L2 message indexing)", + tenant_id_for_automation + ); + if let Err(e) = automation_manager.start(event_rx_main).await { + tracing::error!("AutomationManager stopped with error: {}", e); + } + }); - // 创建LayerGenerator(用于退出时手动生成) + // 创建 LayerGenerator(供 ensure_all_layers / ensure_session_layers 手动调用) let layer_gen_config = LayerGenerationConfig { batch_size: 10, delay_ms: 1000, @@ -256,75 +247,6 @@ impl MemoryOperations { layer_gen_config, )); - let automation_manager = AutomationManager::new( - auto_indexer.clone(), - None, // extractor由单独的监听器处理 - automation_config, - ) - .with_layer_generator(layer_generator.clone()); // 设置LayerGenerator - - // 创建事件转发器(将主EventBus的事件转发给两个监听器) - let (tx_automation, rx_automation) = tokio::sync::mpsc::unbounded_channel(); - let (tx_extractor, rx_extractor) = tokio::sync::mpsc::unbounded_channel(); - - tokio::spawn(async move { - while let Some(event) = event_rx_main.recv().await { - // 转发给AutomationManager - let _ = tx_automation.send(event.clone()); - // 转发给AutoExtractor监听器 - let _ = tx_extractor.send(event); - } - }); - - // 启动AutomationManager监听事件并自动索引 - let tenant_id_for_automation = tenant_id.clone(); - tokio::spawn(async move { - tracing::info!( - "Starting AutomationManager for tenant {}", - tenant_id_for_automation - ); - if let Err(e) = automation_manager.start(rx_automation).await { - tracing::error!("AutomationManager stopped with error: {}", e); - } - }); - - // 启动后台监听器处理SessionClosed事件 - let extractor_clone = auto_extractor.clone(); - let tenant_id_clone = tenant_id.clone(); - tokio::spawn(async move { - tracing::info!( - "Starting AutoExtractor event listener for tenant {}", - tenant_id_clone - ); - let mut rx = rx_extractor; - while let Some(event) = rx.recv().await { - if let cortex_mem_core::CortexEvent::Session(session_event) = event { - match session_event { - cortex_mem_core::SessionEvent::Closed { session_id } => { - tracing::info!("Session closed event received: {}", session_id); - match extractor_clone.extract_session(&session_id).await { - Ok(stats) => { - tracing::info!( - "Extraction completed for session {}: {:?}", - session_id, - stats - ); - } - Err(e) => { - tracing::error!( - "Extraction failed for session {}: {}", - session_id, - e - ); - } - } - } - _ => {} // 忽略其他事件 - } - } - } - }); - // Auto-sync existing content to vector database (in background) let sync_manager = SyncManager::new( filesystem.clone(), @@ -357,11 +279,9 @@ impl MemoryOperations { session_manager, layer_manager, vector_engine, - auto_extractor: Some(auto_extractor), - layer_generator: Some(layer_generator), // 保存LayerGenerator用于退出时生成 - auto_indexer: Some(auto_indexer), // 保存AutoIndexer用于退出时索引 + layer_generator: Some(layer_generator), + auto_indexer: Some(auto_indexer), - // 保存组件引用以便退出时索引使用 embedding_client, vector_store, llm_client, @@ -369,10 +289,7 @@ impl MemoryOperations { default_user_id: actual_user_id, default_agent_id: tenant_id.clone(), - // v2.5: 保存事件发送器 memory_event_tx: Some(memory_event_tx), - - // v2.5: 保存事件协调器引用,用于等待后台任务完成 event_coordinator: Some(coordinator_clone), }) } @@ -509,7 +426,13 @@ impl MemoryOperations { }) } - /// Close session + /// Close session (fire-and-forget). + /// + /// Sends a `SessionClosed` event to `MemoryEventCoordinator` via channel. + /// Memory extraction and L0/L1 generation happen **asynchronously** in the + /// background; this method returns before they complete. + /// + /// Use `close_session_sync` in exit/shutdown flows where you need to wait. pub async fn close_session(&self, thread_id: &str) -> Result<()> { let mut sm = self.session_manager.write().await; sm.close_session(thread_id).await?; @@ -517,6 +440,51 @@ impl MemoryOperations { Ok(()) } + /// Close session and synchronously wait for the full processing pipeline. + /// + /// Blocks until: + /// 1. Session metadata → marked closed (EventBus `SessionClosed` published) + /// 2. LLM memory extraction from session timeline + /// 3. user/agent memory files written + /// 4. L0/L1 layer files generated for all affected directories + /// 5. Session timeline synced to vector store + /// + /// Suitable for shutdown/exit flows. After this returns you can call + /// `index_all_files` knowing all L0/L1 files are already on disk. + pub async fn close_session_sync(&self, thread_id: &str) -> Result<()> { + // 1. Mark session as closed (metadata + legacy EventBus event) + let metadata = { + let mut sm = self.session_manager.write().await; + sm.close_session_metadata_only(thread_id).await? + }; + + let user_id = metadata.user_id.as_deref().unwrap_or("default"); + let agent_id = metadata.agent_id.as_deref().unwrap_or("default"); + + tracing::info!( + "Session {} marked closed, starting synchronous processing (user={}, agent={})...", + thread_id, user_id, agent_id + ); + + // 2. Run the full processing pipeline synchronously via coordinator + if let Some(ref coordinator) = self.event_coordinator { + coordinator + .process_session_closed(thread_id, user_id, agent_id) + .await?; + tracing::info!( + "Session {} processing complete (memory extraction + L0/L1 generated)", + thread_id + ); + } else { + tracing::warn!( + "MemoryEventCoordinator not initialized; session {} processing skipped", + thread_id + ); + } + + Ok(()) + } + /// Read file from filesystem pub async fn read_file(&self, uri: &str) -> Result { let content = self.filesystem.read(uri).await?; @@ -714,21 +682,10 @@ impl MemoryOperations { } } - /// 等待所有后台异步任务完成 - /// - /// 这个方法会等待 MemoryEventCoordinator 处理完所有待处理的事件。 - /// 由于 SessionClosed 事件会触发 LLM 调用(记忆提取 + 层级生成), - /// 这个方法会等待足够长的时间让这些操作完成。 + /// 等待所有后台异步任务完成(用于长时间服务等待) /// - /// # Arguments - /// * `max_wait_secs` - 最大等待时间(秒) - /// - /// # Returns - /// 返回是否成功完成(true = 完成,false = 超时) - /// - /// # Note - /// v2.5 改进:使用真正的事件通知机制等待后台任务完成 - /// 而不是基于时间的启发式等待 + /// 使用真正的事件通知机制等待,而非固定超时。 + /// 在退出流程中建议优先使用 `close_session_sync`。 pub async fn wait_for_background_tasks(&self, max_wait_secs: u64) -> bool { use std::time::Duration; @@ -745,18 +702,11 @@ impl MemoryOperations { } } - /// 刷新并等待所有后台任务完成(用于退出流程) - /// - /// 这个方法会: - /// 1. 等待当前正在处理的事件完成 - /// 2. 强制处理 debouncer 中所有待处理的层级更新 - /// 3. 再次等待确保所有更新完成 + /// Wait for MemoryEventCoordinator to drain all pending events (deprecated-style polling). /// - /// 使用事件通知机制而非固定超时,确保真正等待任务完成。 - /// 由于涉及 LLM 调用,可能需要较长时间。 - /// - /// # Arguments - /// * `check_interval_secs` - 检查间隔(秒),默认 1 秒 + /// Prefer `close_session_sync` for exit flows — it blocks until the pipeline + /// completes without polling. This method is retained for legacy call-sites + /// where fire-and-forget + explicit wait is still needed. pub async fn flush_and_wait(&self, check_interval_secs: Option) -> bool { let interval = std::time::Duration::from_secs(check_interval_secs.unwrap_or(1)); diff --git a/cortex-mem-tools/tests/llm_integration_tests.rs b/cortex-mem-tools/tests/llm_integration_tests.rs index 6c8ddc3..7e4ccaa 100644 --- a/cortex-mem-tools/tests/llm_integration_tests.rs +++ b/cortex-mem-tools/tests/llm_integration_tests.rs @@ -382,7 +382,7 @@ OAuth 2.0 是一种授权框架,允许第三方应用获取对用户资源的 mod llm_memory_extraction_tests { use super::*; - /// 测试 LLM 记忆提取(如果 AutoExtractor 可用) + /// 测试通过 close_session_sync 触发 LLM 记忆提取(MemoryEventCoordinator 路径) #[tokio::test] async fn test_llm_memory_extraction() { let env = setup_llm_test_env().await; @@ -399,10 +399,10 @@ mod llm_memory_extraction_tests { ops.add_message(thread_id, "assistant", "是的,Rust 的所有权系统确实很独特。你还喜欢其他什么编程语言?").await.unwrap(); ops.add_message(thread_id, "user", "我也喜欢 Python,适合快速原型开发。").await.unwrap(); - // 关闭 session 触发提取(如果 AutoExtractor 配置好) - ops.close_session(thread_id).await.ok(); + // 同步关闭 session,MemoryEventCoordinator 完成记忆提取和 L0/L1 生成后才返回 + ops.close_session_sync(thread_id).await.ok(); - println!("✅ 对话已存储,LLM 提取应在后台完成"); + println!("✅ 对话已关闭,LLM 记忆提取已同步完成"); // 验证 session 存在 let session = ops.get_session(thread_id).await; diff --git a/examples/cortex-mem-tars/src/agent.rs b/examples/cortex-mem-tars/src/agent.rs index 51fb29e..0ce2321 100644 --- a/examples/cortex-mem-tars/src/agent.rs +++ b/examples/cortex-mem-tars/src/agent.rs @@ -107,179 +107,348 @@ pub async fn create_memory_agent( // 构建 system prompt let base_system_prompt = if let Some(info) = user_info { format!( - r#"你是一个拥有分层记忆功能的智能 AI 助手。 - -此会话发生的初始时间:{current_time} - -你的 Bot ID:{bot_id} - -记忆工具说明: - -🔑 **URI 格式规范(非常重要!)** -- 所有 URI 必须使用 `cortex://` 前缀,**禁止使用 `memory://`** -- ✅ 正确示例:`cortex://user/tars_user/` -- ❌ 错误示例:`memory://me/SkyronJ/`(常见错误!) - -📍 URI 路径结构: -- `cortex://user/{{user_id}}/` - 用户记忆目录 -- `cortex://user/{{user_id}}/profile.json` - 用户档案 -- `cortex://agent/{{agent_id}}/` - Agent 记忆目录 -- `cortex://session/{{session_id}}/` - 特定会话 -- `cortex://resources/` - 知识库 - -🔍 搜索工具: -- search(query, options): 智能搜索记忆 - - return_layers: ["L0"] (默认) | ["L0", "L1"] | ["L0", "L1", "L2"] - - scope: 搜索范围(可选) - * 可以指定搜索范围: - - "cortex://user/" - 用户记忆 - - "cortex://agent/" - Agent 记忆 - - "cortex://session/{{session_id}}/" - 特定会话 - - "cortex://resources/" - 知识库 - - 示例:search(query="Python 装饰器", return_layers=["L0"]) - -- find(query): 快速查找,返回 L0 摘要 - - 自动在记忆空间中搜索 - - 例如:find(query="用户偏好") - -📖 分层访问工具(按需加载): -- abstract(uri): 获取 L0 摘要(~100 tokens)- 快速判断相关性 - - 示例:abstract(uri="cortex://user/tars_user/") -- overview(uri): 获取 L1 概览(~2000 tokens)- 理解核心信息 - - 示例:overview(uri="cortex://session/abc123/") -- read(uri): 获取 L2 完整内容 - 仅在必须了解详细信息时使用 - -📂 文件系统工具: -- ls(uri, options): 列出目录内容 - - include_abstracts: 是否包含文件摘要 - - 用于浏览记忆结构 - - ✅ 示例:ls(uri="cortex://user/tars_user/") - - ❌ 错误:ls(uri="memory://me/SkyronJ/") - -⚠️ **常见错误提醒**: -- 不要使用 `memory://` 前缀,必须用 `cortex://` -- user_id 是分配的用户标识符,不是"me"或用户名 -- 访问用户记忆用 `cortex://user/{{user_id}}/`,不是 `cortex://me/` - -📍 **主动召回原则**(关键): -当用户的问题可能涉及历史信息、用户偏好或之前的对话内容时,你必须**主动**调用记忆工具。 - -**必须主动搜索的场景**: -- 用户问"你记得...吗?"、"告诉我你都记得什么?" → 立即调用 search 或 ls -- 用户提到人名、地点、事件、项目名 → 立即调用 search(query="人名/事件") 查找相关记忆 -- 用户询问历史对话、之前的讨论 → 立即调用 search 或 find -- 用户的问题涉及用户偏好、习惯、背景 → 立即调用 search 查找用户记忆 -- 你不确定如何回答,或感觉记忆中可能有相关信息 → 先调用 search 确认 - -**搜索策略**: -1. 优先使用 search 查找相关记忆,默认只返回 L0 摘要 -2. 根据 L0 摘要判断相关性,需要更多信息时调用 overview 获取 L1 -3. 仅在必须了解完整细节时调用 read 获取 L2 -4. 这种渐进式加载可以大幅减少 token 消耗(节省 80-90%) - -记忆隔离说明: -- 每个 Bot 拥有独立的租户空间(物理隔离) -- 记忆组织采用的架构: - - cortex://resources/ - 知识库 - - cortex://user/ - 用户记忆 - - cortex://agent/ - Agent 记忆 - - cortex://session/ - 会话记录 -- 对话内容会自动保存到 session,你无需关心存储 - -📍 **Agent经验召回**(重要): -你可以主动搜索之前处理过的类似问题的经验案例: -- 使用 search(query="问题描述", scope="cortex://agent/{bot_id}/cases") 搜索相关经验 -- Agent cases 包含了之前遇到的问题、解决方案和经验教训 -- 遇到复杂问题时,优先搜索是否有相关经验可以借鉴 - -用户基本信息: + r#"你是一个拥有持久分层记忆能力的智能 AI 助手(TARS)。 + +会话开始时间:{current_time} +你的 agent_id:{agent_id} +当前用户 user_id:tars_user + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +📚 记忆系统概览 +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +你拥有一套分层记忆系统,所有信息按 L0/L1/L2 三个粒度存储: + L0(~100 tokens)— 精炼的多主题摘要,回答"这里有哪些话题"(广度优先) + L1(~2000 tokens)— 结构化概览,回答"核心内容是什么" + L2 — 完整原文,回答"具体细节是什么" + +记忆空间的 URI 统一以 `cortex://` 开头,按四个维度组织: + + cortex://session/ — 会话记录(每条对话自动存储,无需手动操作) + cortex://user/tars_user/ — 用户长期记忆(会话结束时系统自动提取) + ├── personal_info/ 个人信息(姓名、职业等) + ├── preferences/ 偏好习惯(编程语言、工作方式等) + ├── work_history/ 工作经历 + ├── relationships/ 人际关系 + ├── goals/ 目标愿景 + ├── entities/ 提到过的实体(人名/项目/工具) + └── events/ 重要事件 + cortex://agent/{agent_id}/ — Agent 经验记忆(会话结束时系统自动提取) + └── cases/ 解决过的问题和经验 + cortex://resources/ — 共享知识库 + +注:每个目录的物理隔离已由系统在租户层自动处理, + URI 中的 tars_user 和 {agent_id} 分别是 user_id 和 agent_id, + 是 Cortex Memory 中用户/Agent 的逻辑标识,你直接使用上面的 URI 格式即可。 + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +🔧 工具清单与使用规范 +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +你有 6 个工具可用。下面按「何时用」「怎么用」「返回什么」逐一说明。 + +───────────────────────────────── +① search — 语义向量搜索(主力工具) +───────────────────────────────── +何时用:你需要在记忆中找到与某个话题相关的内容时。这是你最常用的工具。 +怎么用: + search(query="用户喜欢的编程语言") + search(query="Python 异步", scope="cortex://user/tars_user") + search(query="之前解决的编译错误", scope="cortex://agent/{agent_id}/cases") + search(query="上周讨论的内容", return_layers=["L0","L1"], limit=5) +参数说明: + query — 用自然语言描述你要找什么(必填) + scope — 限定搜索范围的 URI 前缀(可选) + 不填 → 在所有 session 记录中搜索 + "cortex://user/tars_user" → 仅搜用户长期记忆 + "cortex://agent/{agent_id}/cases" → 仅搜 Agent 经验 + return_layers — 控制返回详细程度(可选,默认 ["L0"]) + ["L0"] → 每条结果只含一句话摘要(最省 token) + ["L0","L1"] → 同时含概览 + ["L0","L1","L2"] → 含完整原文(慎用,token 消耗大) + limit — 最多返回几条(可选,默认 10) +返回什么:每条结果包含 uri + score + 你请求的各层内容。 + +───────────────────────────────── +② find — 快速查找(search 的简化版) +───────────────────────────────── +何时用:你只想快速扫一眼有没有相关记忆,不关心相似度分数。 +怎么用: + find(query="用户偏好") + find(query="Rust", scope="cortex://user/tars_user") +区别于 search: + · find 固定返回 L0 摘要,结果结构更简洁(只有 uri + abstract_text) + · find 不支持 return_layers 参数 + · 适合做"有没有"的快速判断;如果要精细控制,用 search + +───────────────────────────────── +③ abstract — 读取指定 URI 的 L0 摘要 +───────────────────────────────── +何时用:你已经知道一个具体的 URI(比如从 ls 或 search 结果中获得), + 想花最少 token 快速了解"这个目录/文件讲的是什么"。 +怎么用: + abstract(uri="cortex://user/tars_user/preferences") + abstract(uri="cortex://agent/{agent_id}/cases") + abstract(uri="cortex://session/abc123/timeline") +返回什么:该 URI 对应的 .abstract.md 内容(~100 tokens,广度摘要)。 +注意:L0 由系统异步生成,如果摘要尚未就绪会返回错误, + 此时改用 overview(uri) 获取 L1,而非 read(read 是读原文,token 消耗大)。 + +───────────────────────────────── +④ overview — 读取指定 URI 的 L1 概览 +───────────────────────────────── +何时用:通过 search 或 abstract 确认了某个 URI 相关后, + 想进一步了解其核心内容(主题、要点、实体),但又不想读完整原文。 + 也可以在 abstract 返回错误时作为 fallback 使用。 +怎么用: + overview(uri="cortex://user/tars_user/work_history") + overview(uri="cortex://user/tars_user/preferences") + overview(uri="cortex://agent/{agent_id}/cases") +返回什么:该 URI 对应的 .overview.md 内容(~500-2000 tokens), + 包含结构化的 Summary / Core Topics / Key Points / Entities。 + +───────────────────────────────── +⑤ read — 读取 L2 完整原文 +───────────────────────────────── +何时用:你需要精确的细节信息(具体日期、代码片段、完整对话原文), + 且 overview 的 L1 内容仍不够详细时才使用。token 消耗最大,慎用。 +怎么用(URI 来自 ls 或 search 结果中的具体文件路径): + read(uri="cortex://user/tars_user/preferences/pref_a1b2c3d4.md") + read(uri="cortex://agent/{agent_id}/cases/case_e5f6g7h8.md") + read(uri="cortex://session/some-session-id/timeline/2026-03/10/14_30_00_abcd1234.md") +返回什么:该文件的完整内容 + 创建/更新时间。 + +───────────────────────────────── +⑥ ls — 列出目录内容 +───────────────────────────────── +何时用:你想浏览记忆空间的结构,看看某个目录下有什么子目录和文件。 +怎么用: + ls(uri="cortex://user/tars_user") + ls(uri="cortex://session") + ls(uri="cortex://agent/{agent_id}/cases", include_abstracts=true) + ls() ← 不传 uri 默认列出 cortex://session +参数说明: + include_abstracts — 是否附带每个文件的 L0 摘要(可选,默认 false) + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +🧭 工具使用决策树 +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +收到用户消息后,按以下流程决策: + + 1. 这个问题是否可能涉及历史信息、用户偏好或过往对话? + ├─ 否 → 直接回答,不需要调工具 + └─ 是 → 进入步骤 2 + + 2. 我是否知道信息在哪个目录? + ├─ 知道(如"用户偏好"在 cortex://user/tars_user/preferences) + │ → 直接调 abstract(uri) 或 overview(uri) 读取 + └─ 不知道 → 进入步骤 3 + + 3. 用 search 进行语义搜索: + search(query="...", scope="可选限定范围") + └─ 看返回的 L0 摘要列表 + + 4. L0 摘要是否足够回答问题? + ├─ 足够 → 直接用 L0 信息回答 + └─ 不够 → 对相关结果调 overview(uri) 获取 L1 + + 5. L1 概览是否足够回答问题? + ├─ 足够 → 用 L1 信息回答(绝大多数情况到此为止) + └─ 不够 → 调 read(uri) 获取 L2 完整原文 + +关键原则:L0 → L1 → L2 逐层深入,每层都先判断"够不够", + 不要跳过 L0/L1 直接读 L2,这会浪费 80-90% 的 token。 + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +⚡ 主动搜索触发规则(必须遵守) +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +以下场景你必须主动调工具,不要等用户明确说"搜索": + + "你记得我说过...吗?" + → search(query="用户提到的关键词", scope="cortex://user/tars_user") + + 用户提到人名/项目名/技术名词 + → search(query="该名词") + + "我之前让你做过..."/"上次我们讨论了..." + → search(query="相关描述", scope="cortex://session") + + 用户问偏好/习惯/背景 + → overview(uri="cortex://user/tars_user/preferences") + → 如果不存在,search(query="用户偏好习惯") + + 遇到复杂问题,你不确定怎么解 + → search(query="问题描述", scope="cortex://agent/{agent_id}/cases") + + "你都记得什么?"/"告诉我你对我的了解" + → ls(uri="cortex://user/tars_user", include_abstracts=true) + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +📌 关于记忆存储(你无需手动操作) +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +- 每条对话消息自动保存到 session timeline +- 会话结束后系统自动提取用户记忆和 Agent 经验,写入对应目录 +- L0/L1 摘要由系统自动生成 +- 你不需要也无法调用 store 工具,专注于"读"和"搜"即可 + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +用户已有记忆(预加载) +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ {info} -重要指令: -- 你是一个**主动**使用记忆的 AI 助手,不要等待用户明确说"搜索"才去查找记忆! -- 遇到任何可能涉及历史信息的问题,**先搜索,再回答** -- 自然地融入记忆信息,避免生硬地说"根据记忆..." -- 如果搜索后没有找到相关信息,诚实告知用户 +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +🎯 核心行为准则 +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +1. 先搜索,再回答 — 涉及历史信息时,先调工具确认 +2. 自然融合 — 不要说"根据记忆系统...",直接使用信息 +3. 诚实告知 — 搜索后没找到就说"我没有这方面的记录" +4. 逐层深入 — L0 → L1 → L2,按需加载,节省 token +5. 主动召回 — 遇到可能涉及历史的场景,不等用户要求就搜索 "#, current_time = chrono::Local::now().format("%Y年%m月%d日 %H:%M:%S"), - bot_id = agent_id, + agent_id = agent_id, info = info ) } else { format!( - r#"你是一个拥有分层记忆功能的智能 AI 助手。 - -此会话发生的初始时间:{current_time} - -你的 Bot ID:{bot_id} - -记忆工具说明: - -🔑 **URI 格式规范(非常重要!)** -- 所有 URI 必须使用 `cortex://` 前缀,**禁止使用 `memory://`** -- ✅ 正确示例:`cortex://user/tars_user/` -- ❌ 错误示例:`memory://me/SkyronJ/`(常见错误!) - -📍 URI 路径结构: -- `cortex://user/{{user_id}}/` - 用户记忆目录 -- `cortex://user/{{user_id}}/profile.json` - 用户档案 -- `cortex://agent/{{agent_id}}/` - Agent 记忆目录 -- `cortex://session/{{session_id}}/` - 特定会话 -- `cortex://resources/` - 知识库 - -🔍 搜索工具: -- search(query, options): 智能搜索记忆 - - return_layers: ["L0"] (默认) | ["L0", "L1"] | ["L0", "L1", "L2"] - - scope: 搜索范围(可选) - - 示例:search(query="Python 装饰器", return_layers=["L0"]) - -- find(query): 快速查找,返回 L0 摘要 - - 自动在记忆空间中搜索 - - 例如:find(query="用户偏好") - -📖 分层访问工具(按需加载): -- abstract(uri): L0 摘要(~100 tokens)- 快速判断相关性 - - 示例:abstract(uri="cortex://user/tars_user/") -- overview(uri): L1 概览(~2000 tokens)- 理解核心信息 - - 示例:overview(uri="cortex://session/abc123/") -- read(uri): L2 完整内容 - 仅在必要时使用 - -📂 文件系统工具: -- ls(uri): 列出目录内容 - - ✅ 示例:ls(uri="cortex://user/tars_user/") - - ❌ 错误:ls(uri="memory://me/SkyronJ/") - -⚠️ **常见错误提醒**: -- 不要使用 `memory://` 前缀,必须用 `cortex://` -- user_id 是分配的用户标识符,不是"me"或用户名 -- 访问用户记忆用 `cortex://user/{{user_id}}/`,不是 `cortex://me/` - -📍 **主动召回原则**(关键): -当用户的问题可能涉及历史信息、用户偏好或之前的对话内容时,你必须**主动**调用记忆工具。 - -**必须主动搜索的场景**: -- 用户问"你记得...吗?"、"告诉我你都记得什么?" → 立即调用 search 或 ls -- 用户提到人名、地点、事件、项目名 → 立即调用 search(query="人名/事件") 查找 -- 用户询问历史对话、之前的讨论 → 立即调用 search 或 find -- 你不确定如何回答 → 先调用 search 确认记忆中是否有相关信息 - -**搜索策略**: -1. 优先使用 search,默认返回 L0 摘要 -2. 根据 L0 判断相关性,需要时调用 overview 获取 L1 -3. 仅在必须时调用 read 获取 L2 完整内容 -4. 渐进式加载可节省 80-90% token - -重要指令: -- 你是一个**主动**使用记忆的 AI 助手,不要等待用户明确说"搜索"才去查找记忆! -- 遇到任何可能涉及历史信息的问题,**先搜索,再回答** -- 对话内容会自动保存到 session,你无需关心存储 - -记忆隔离说明: -- 每个 Bot 拥有独立的租户空间(物理隔离) -- 你的记忆不会与其他 Bot 共享 + r#"你是一个拥有持久分层记忆能力的智能 AI 助手(TARS)。 + +会话开始时间:{current_time} +你的 agent_id:{agent_id} +当前用户 user_id:tars_user + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +📚 记忆系统概览 +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +你拥有一套分层记忆系统,所有信息按 L0/L1/L2 三个粒度存储: + L0(~100 tokens)— 精炼的多主题摘要,回答"这里有哪些话题"(广度优先) + L1(~2000 tokens)— 结构化概览,回答"核心内容是什么" + L2 — 完整原文,回答"具体细节是什么" + +记忆空间的 URI 统一以 `cortex://` 开头,按四个维度组织: + + cortex://session/ — 会话记录(每条对话自动存储,无需手动操作) + cortex://user/tars_user/ — 用户长期记忆 + ├── personal_info/ 个人信息 + ├── preferences/ 偏好习惯 + ├── work_history/ 工作经历 + ├── relationships/ 人际关系 + ├── goals/ 目标愿景 + ├── entities/ 提到过的实体 + └── events/ 重要事件 + cortex://agent/{agent_id}/ — Agent 经验记忆 + └── cases/ 解决过的问题和经验 + cortex://resources/ — 共享知识库 + +注:租户隔离由系统在物理存储层自动处理, + URI 中的 tars_user 和 {agent_id} 分别是 user_id 和 agent_id, + 是 Cortex Memory 中用户/Agent 的逻辑标识,直接使用即可。 + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +🔧 工具清单与使用规范 +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +你有 6 个工具可用。 + +───────────────────────────────── +① search — 语义向量搜索(主力工具) +───────────────────────────────── +何时用:需要在记忆中找到与某个话题相关的内容。 +怎么用: + search(query="用户喜欢的编程语言") + search(query="Python 异步", scope="cortex://user/tars_user") + search(query="之前解决的编译错误", scope="cortex://agent/{agent_id}/cases") + search(query="上周讨论的内容", return_layers=["L0","L1"], limit=5) +参数: + query(必填)— 自然语言描述 + scope(可选)— 限定搜索范围的 URI 前缀 + return_layers(可选,默认["L0"])— 控制返回详细程度 + limit(可选,默认10)— 最大结果数 +返回:每条结果含 uri + score + 你请求的各层内容。 +② find — 快速查找(search 的简化版) +───────────────────────────────── +何时用:只想快速扫一眼有没有相关记忆,不关心分数。 +怎么用:find(query="用户偏好") +区别于 search:固定返回 L0,结果只有 uri + abstract_text。 +适合做"有没有"的快速判断。 + +───────────────────────────────── +③ abstract — 读取指定 URI 的 L0 摘要 +───────────────────────────────── +何时用:已知 URI,花最少 token 了解"这里讲的是什么"。 +怎么用:abstract(uri="cortex://user/tars_user/preferences") +返回:~100 tokens 的广度摘要。 +注意:L0 异步生成,摘要未就绪时返回错误,改用 overview(uri) 而非 read。 + +───────────────────────────────── +④ overview — 读取指定 URI 的 L1 概览 +───────────────────────────────── +何时用:通过 search 或 abstract 确认相关后,进一步了解核心内容; + 或作为 abstract 失败时的 fallback。 +怎么用: + overview(uri="cortex://user/tars_user/work_history") + overview(uri="cortex://user/tars_user/preferences") + overview(uri="cortex://agent/{agent_id}/cases") +返回:~500-2000 tokens 的结构化概览(Summary / Topics / Key Points / Entities)。 + +───────────────────────────────── +⑤ read — 读取 L2 完整原文 +───────────────────────────────── +何时用:overview 内容仍不够详细时才使用。token 消耗最大,慎用。 +URI 来自 ls 或 search 结果中的具体文件路径,例如: + read(uri="cortex://user/tars_user/preferences/pref_a1b2c3d4.md") + read(uri="cortex://agent/{agent_id}/cases/case_e5f6g7h8.md") + +───────────────────────────────── +⑥ ls — 列出目录内容 +───────────────────────────────── +何时用:浏览记忆空间结构。 +怎么用:ls(uri="cortex://user/tars_user") + ls(uri="cortex://session", include_abstracts=true) + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +🧭 工具使用决策树 +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + 1. 是否涉及历史信息? — 否 → 直接回答 + 2. 知道信息在哪个目录? — 知道 → abstract/overview 直接读 + 3. 不知道 → search 语义搜索 + 4. L0 够?→ 回答 ‖ 不够 → overview 读 L1 + 5. L1 够?→ 回答 ‖ 不够 → read 读 L2 + +原则:L0 → L1 → L2 逐层深入,不跳级。 + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +⚡ 主动搜索触发规则 +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +- "你记得...吗?" → search(scope="cortex://user/tars_user") +- 提到人名/项目名 → search(query="...") +- "我之前说过..." → search(scope="cortex://session") +- 问偏好/习惯 → overview(uri="cortex://user/tars_user/preferences") +- 复杂问题 → search(scope="cortex://agent/{agent_id}/cases") +- "你都记得什么?" → ls(uri="cortex://user/tars_user", include_abstracts=true) + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +📌 关于记忆存储 +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +- 对话自动保存到 session timeline +- 会话结束后系统自动提取用户/Agent 记忆 +- L0/L1 由系统自动生成,你无需操作 + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +🎯 核心行为准则 +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +1. 先搜索,再回答 +2. 自然融合,不说"根据记忆..." +3. 诚实告知缺失 +4. 逐层深入 L0→L1→L2 +5. 主动召回,不等用户要求 "#, current_time = chrono::Local::now().format("%Y年%m月%d日 %H:%M:%S"), - bot_id = agent_id + agent_id = agent_id ) }; diff --git a/examples/cortex-mem-tars/src/app.rs b/examples/cortex-mem-tars/src/app.rs index ca75686..462cfa8 100644 --- a/examples/cortex-mem-tars/src/app.rs +++ b/examples/cortex-mem-tars/src/app.rs @@ -18,8 +18,7 @@ use std::io; use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::sync::mpsc; -// 🔧 移除未使用的导入 -// use cortex_mem_core::automation::{AutoExtractor, AutoExtractConfig}; +// use cortex_mem_core::automation::{AutoExtractor, AutoExtractConfig}; // removed in v2.5 // 音频相关导入 use crate::audio_input; @@ -33,9 +32,8 @@ pub struct App { ui: AppUi, current_bot: Option, rig_agent: Option>, - tenant_operations: Option>, // 租户隔离的 operations - // auto_extractor已移除 - 由Cortex Memory统一管理 - current_session_id: Option, // 当前会话ID + tenant_operations: Option>, + current_session_id: Option, infrastructure: Option>, user_id: String, user_info: Option, @@ -96,9 +94,8 @@ impl App { ui, current_bot: None, rig_agent: None, - tenant_operations: None, // 初始化为 None,在选择 Bot 时创建 - // auto_extractor已移除 - 由Cortex Memory统一管理 - current_session_id: None, // 初始化为 None,在开始对话时创建 + tenant_operations: None, + current_session_id: None, infrastructure, user_id: "tars_user".to_string(), user_info: None, @@ -1068,61 +1065,23 @@ impl App { let _ = self.disable_audio_input().await; } - // 🔧 修复:使用close_session代替直接调用extract_session if let (Some(tenant_ops), Some(session_id)) = (&self.tenant_operations, &self.current_session_id) { - log::info!("🧠 开始关闭会话并提取记忆..."); - - // 关闭会话(会触发timeline层生成和memory extraction) - let session_manager = tenant_ops.session_manager().clone(); - match session_manager - .write() - .await - .close_session(session_id) - .await - { + // 同步关闭会话:等待记忆提取 + user/agent 文件写入 + L0/L1 生成全部完成后才返回 + log::info!("🧠 同步关闭会话,等待记忆提取与层级文件生成完成..."); + match tenant_ops.close_session_sync(session_id).await { Ok(_) => { - log::info!("✅ 会话已关闭,SessionClosed 事件已发送"); + log::info!("✅ 会话已关闭,记忆提取与 L0/L1 生成完成"); } Err(e) => { log::warn!("⚠️ 会话关闭失败: {}", e); } } - // 🔧 v2.5: 刷新并等待所有后台任务完成 - // 这会: - // 1. 等待事件处理完成(包括记忆提取) - // 2. 刷新 debouncer 中的待处理层级更新 - // 3. 再次等待确保所有更新完成 - // 使用真正的事件通知机制,不使用固定超时 - log::info!("⏳ 刷新并等待所有后台任务完成..."); - let completed = tenant_ops.flush_and_wait(Some(1)).await; - if !completed { - log::warn!("⚠️ flush_and_wait 返回 false,可能有任务未完成"); - } - - // 退出时生成所有缺失的 L0/L1 层级文件 - // ensure_all_layers 已经会扫描所有维度 - log::info!("📑 开始生成所有缺失的 L0/L1 层级文件..."); - match tenant_ops.ensure_all_layers().await { - Ok(stats) => { - log::info!( - "✅ 层级文件生成完成: 总计 {}, 成功 {}, 失败 {}", - stats.total, - stats.generated, - stats.failed - ); - } - Err(e) => { - log::warn!("⚠️ 层级文件生成失败: {}", e); - } - } - - // 退出时索引所有文件到向量数据库 - // 🔧 添加超时保护,避免因 Qdrant 或 Embedding 服务不可用而卡住 + // 索引所有文件到向量数据库(此时 L0/L1 已全部生成,索引数据完整) log::info!("📊 开始索引所有文件到向量数据库..."); - let index_timeout = tokio::time::Duration::from_secs(120); + let index_timeout = tokio::time::Duration::from_secs(360); match tokio::time::timeout(index_timeout, tenant_ops.index_all_files()).await { Ok(Ok(stats)) => { log::info!( From 0b4f98a23789bd71fadf7f260bb5be2a79deadf4 Mon Sep 17 00:00:00 2001 From: Sopaco Date: Tue, 10 Mar 2026 18:47:23 +0800 Subject: [PATCH 09/20] update --- Cargo.lock | 95 ++ cortex-mem-cli/Cargo.toml | 5 + cortex-mem-cli/test_cli.sh | 208 --- .../src/automation/layer_generator_tests.rs | 273 ---- cortex-mem-core/src/automation/mod.rs | 4 - cortex-mem-core/src/filesystem/mod.rs | 3 - cortex-mem-core/src/filesystem/tests.rs | 101 -- cortex-mem-core/src/vector_store/mod.rs | 3 - cortex-mem-core/src/vector_store/tests.rs | 106 -- .../tests/core_functionality_tests.rs | 1281 ----------------- .../tests/llm_integration_tests.rs | 513 ------- 11 files changed, 100 insertions(+), 2492 deletions(-) delete mode 100755 cortex-mem-cli/test_cli.sh delete mode 100644 cortex-mem-core/src/automation/layer_generator_tests.rs delete mode 100644 cortex-mem-core/src/filesystem/tests.rs delete mode 100644 cortex-mem-core/src/vector_store/tests.rs delete mode 100644 cortex-mem-tools/tests/core_functionality_tests.rs delete mode 100644 cortex-mem-tools/tests/llm_integration_tests.rs diff --git a/Cargo.lock b/Cargo.lock index 1f11962..8a0bd48 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -129,6 +129,21 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b0f477b951e452a0b6b4a10b53ccd569042d1d01729b519e02074a9c0958a063" +[[package]] +name = "assert_cmd" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c5bcfa8749ac45dd12cb11055aeeb6b27a3895560d60d71e3c23bf979e60514" +dependencies = [ + "anstyle", + "bstr", + "libc", + "predicates", + "predicates-core", + "predicates-tree", + "wait-timeout", +] + [[package]] name = "async-stream" version = "0.3.6" @@ -316,6 +331,17 @@ dependencies = [ "objc2", ] +[[package]] +name = "bstr" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63044e1ae8e69f3b5a92c736ca6269b8d12fa7efe39bf34ddb06d102cf0e2cab" +dependencies = [ + "memchr", + "regex-automata", + "serde", +] + [[package]] name = "bumpalo" version = "3.19.0" @@ -577,14 +603,17 @@ name = "cortex-mem-cli" version = "2.5.1" dependencies = [ "anyhow", + "assert_cmd", "chrono", "clap", "colored", "cortex-mem-config", "cortex-mem-core", "cortex-mem-tools", + "predicates", "serde", "serde_json", + "tempfile", "tokio", "tracing", "tracing-subscriber", @@ -953,6 +982,12 @@ version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" +[[package]] +name = "difflib" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" + [[package]] name = "digest" version = "0.10.7" @@ -1134,6 +1169,15 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "float-cmp" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b09cf3155332e944990140d967ff5eceb70df778b34f77d8075db46e4704e6d8" +dependencies = [ + "num-traits", +] + [[package]] name = "fnv" version = "1.0.7" @@ -2071,6 +2115,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "normalize-line-endings" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" + [[package]] name = "nu-ansi-term" version = "0.50.3" @@ -2488,6 +2538,36 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "predicates" +version = "3.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ada8f2932f28a27ee7b70dd6c1c39ea0675c55a36879ab92f3a715eaa1e63cfe" +dependencies = [ + "anstyle", + "difflib", + "float-cmp", + "normalize-line-endings", + "predicates-core", + "regex", +] + +[[package]] +name = "predicates-core" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cad38746f3166b4031b1a0d39ad9f954dd291e7854fcc0eed52ee41a0b50d144" + +[[package]] +name = "predicates-tree" +version = "1.0.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0de1b847b39c8131db0467e9df1ff60e6d0562ab8e9a16e568ad0fdb372e2f2" +dependencies = [ + "predicates-core", + "termtree", +] + [[package]] name = "pretty_assertions" version = "1.4.1" @@ -3710,6 +3790,12 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "termtree" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" + [[package]] name = "thiserror" version = "1.0.69" @@ -4301,6 +4387,15 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" +[[package]] +name = "wait-timeout" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ac3b126d3914f9849036f826e054cbabdc8519970b8998ddaf3b5bd3c65f11" +dependencies = [ + "libc", +] + [[package]] name = "walkdir" version = "2.5.0" diff --git a/cortex-mem-cli/Cargo.toml b/cortex-mem-cli/Cargo.toml index e65d97f..edad562 100644 --- a/cortex-mem-cli/Cargo.toml +++ b/cortex-mem-cli/Cargo.toml @@ -26,3 +26,8 @@ tracing = { workspace = true } tracing-subscriber = { workspace = true } clap = { workspace = true } colored = { workspace = true } + +[dev-dependencies] +assert_cmd = "2.0" +predicates = "3.0" +tempfile = "3.0" diff --git a/cortex-mem-cli/test_cli.sh b/cortex-mem-cli/test_cli.sh deleted file mode 100755 index cc5b020..0000000 --- a/cortex-mem-cli/test_cli.sh +++ /dev/null @@ -1,208 +0,0 @@ -#!/bin/bash -# test_cli.sh - Automated CLI test script for Cortex-Mem - -# Don't use set -e so the script continues on test failures - -# Get the directory where this script is located -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -PROJECT_ROOT="$(dirname "$SCRIPT_DIR")" - -# Configuration - modify these for your environment -CONFIG_PATH="${CONFIG_PATH:-/Users/jiangmeng/Library/Application Support/com.cortex-mem.tars/config.toml}" -TENANT_ID="${TENANT_ID:-bf323233-1f53-4337-a8e7-2ebe9b0080d0}" -CLI="${CLI:-$PROJECT_ROOT/target/release/cortex-mem}" - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' # No Color - -# Test counters -pass=0 -fail=0 -total=0 - -# Test function -test_case() { - local id="$1" - local name="$2" - local cmd="$3" - local expected="$4" - local should_fail="${5:-false}" - - ((total++)) - echo -ne "${BLUE}[$id]${NC} $name... " - - # Use eval to properly handle quoted arguments with spaces - if output=$(eval "$cmd" 2>&1); then - if echo "$output" | grep -q "$expected"; then - echo -e "${GREEN}PASS${NC}" - ((pass++)) - else - echo -e "${RED}FAIL${NC}" - echo " Expected to contain: $expected" - echo " Got: ${output:0:200}..." - ((fail++)) - fi - else - if [ "$should_fail" = "true" ]; then - if echo "$output" | grep -q "$expected"; then - echo -e "${GREEN}PASS (expected error)${NC}" - ((pass++)) - else - echo -e "${RED}FAIL${NC}" - echo " Expected error containing: $expected" - echo " Got: $output" - ((fail++)) - fi - else - echo -e "${RED}FAIL (unexpected error)${NC}" - echo " Error: $output" - ((fail++)) - fi - fi -} - -echo "============================================" -echo " Cortex-Mem CLI Automated Test Suite" -echo "============================================" -echo "" -echo "Configuration:" -echo " CLI: $CLI" -echo " Config: $CONFIG_PATH" -echo " Tenant: $TENANT_ID" -echo "" - -# Check if CLI exists -if [ ! -f "$CLI" ]; then - echo -e "${RED}Error: CLI binary not found at $CLI${NC}" - echo "Please build it first: cargo build --release --bin cortex-mem" - exit 1 -fi - -# Check if config exists -if [ ! -f "$CONFIG_PATH" ]; then - echo -e "${RED}Error: Config file not found at $CONFIG_PATH${NC}" - exit 1 -fi - -echo "============================================" -echo " 1. Basic Commands" -echo "============================================" - -test_case "B01" "Help command" \ - "$CLI --help" \ - "Cortex-Mem CLI" - -test_case "B02" "Version command" \ - "$CLI --version" \ - "cortex-mem" - -echo "" -echo "============================================" -echo " 2. Tenant Management" -echo "============================================" - -test_case "T01" "List tenants" \ - "$CLI -c \"$CONFIG_PATH\" tenant list" \ - "Found" - -echo "" -echo "============================================" -echo " 3. Session Management" -echo "============================================" - -test_case "S01" "List sessions" \ - "$CLI -c \"$CONFIG_PATH\" --tenant \"$TENANT_ID\" session list" \ - "sessions" - -echo "" -echo "============================================" -echo " 4. Statistics" -echo "============================================" - -test_case "ST01" "Show statistics" \ - "$CLI -c \"$CONFIG_PATH\" --tenant \"$TENANT_ID\" stats" \ - "Statistics" - -echo "" -echo "============================================" -echo " 5. Memory Listing" -echo "============================================" - -test_case "L01" "List session root" \ - "$CLI -c \"$CONFIG_PATH\" --tenant \"$TENANT_ID\" list" \ - "Found" - -test_case "L02" "List user dimension" \ - "$CLI -c \"$CONFIG_PATH\" --tenant \"$TENANT_ID\" list --uri cortex://user" \ - "Found" - -echo "" -echo "============================================" -echo " 6. Layer Management" -echo "============================================" - -test_case "Y01" "Layer status (English output)" \ - "$CLI -c \"$CONFIG_PATH\" --tenant \"$TENANT_ID\" layers status" \ - "Layer file status" - -test_case "Y02" "Layer status shows correct command name" \ - "$CLI -c \"$CONFIG_PATH\" --tenant \"$TENANT_ID\" layers status" \ - "cortex-mem layers ensure-all" - -echo "" -echo "============================================" -echo " 7. Error Handling" -echo "============================================" - -# Note: R07 (negative min_score) is skipped because clap rejects negative numbers -# as options before our validation code runs. This is expected clap behavior. - -test_case "R08" "Invalid min_score (> 1.0)" \ - "$CLI -c \"$CONFIG_PATH\" --tenant \"$TENANT_ID\" search test -s 2.0" \ - "min_score must be between" \ - "true" - -test_case "G06" "Invalid URI scheme" \ - "$CLI -c \"$CONFIG_PATH\" --tenant \"$TENANT_ID\" get invalid-uri" \ - "Invalid URI scheme" \ - "true" - -echo "" -echo "============================================" -echo " 8. Directory Abstract (Bug #1 Fix)" -echo "============================================" - -test_case "G03" "Get directory abstract" \ - "$CLI -c \"$CONFIG_PATH\" --tenant \"$TENANT_ID\" get cortex://user/tars_user/entities --abstract-only" \ - "Abstract" - -echo "" -echo "============================================" -echo " 9. Add Message (Bug #4 Fix)" -echo "============================================" - -test_case "M05" "Add message URI format" \ - "$CLI -c \"$CONFIG_PATH\" --tenant \"$TENANT_ID\" add --thread test-auto --role user \"Automated test message\"" \ - "cortex://session/test-auto/timeline" - -echo "" -echo "============================================" -echo " Test Summary" -echo "============================================" -echo "" -echo -e "Total: $total" -echo -e "Passed: ${GREEN}$pass${NC}" -echo -e "Failed: ${RED}$fail${NC}" -echo "" - -if [ $fail -gt 0 ]; then - echo -e "${RED}Some tests failed!${NC}" - exit 1 -else - echo -e "${GREEN}All tests passed!${NC}" - exit 0 -fi diff --git a/cortex-mem-core/src/automation/layer_generator_tests.rs b/cortex-mem-core/src/automation/layer_generator_tests.rs deleted file mode 100644 index 118f728..0000000 --- a/cortex-mem-core/src/automation/layer_generator_tests.rs +++ /dev/null @@ -1,273 +0,0 @@ -use super::*; -use crate::{CortexFilesystem, FilesystemOperations, llm::{LLMClient, LLMConfig, MemoryExtractionResponse}, Result}; -use std::sync::Arc; -use async_trait::async_trait; - -/// Mock LLM Client for testing -struct MockLLMClient { - abstract_response: String, - overview_response: String, -} - -impl MockLLMClient { - fn new() -> Self { - Self { - abstract_response: "Mock abstract summary for testing.".to_string(), - overview_response: "# Mock Overview\n\nThis is a mock overview for testing purposes.\n\n## Topics\n- Testing\n- Mocking".to_string(), - } - } -} - -#[async_trait] -impl LLMClient for MockLLMClient { - async fn complete(&self, _prompt: &str) -> Result { - Ok(self.abstract_response.clone()) - } - - async fn complete_with_system(&self, system: &str, _prompt: &str) -> Result { - if system.contains("abstract") || system.contains("摘要") { - Ok(self.abstract_response.clone()) - } else { - Ok(self.overview_response.clone()) - } - } - - async fn extract_memories(&self, _prompt: &str) -> Result { - Ok(MemoryExtractionResponse { - facts: vec![], - decisions: vec![], - entities: vec![], - }) - } - - async fn extract_structured_facts(&self, _prompt: &str) -> Result { - Ok(crate::llm::extractor_types::StructuredFactExtraction { - facts: vec![], - }) - } - - async fn extract_detailed_facts(&self, _prompt: &str) -> Result { - Ok(crate::llm::extractor_types::DetailedFactExtraction { - facts: vec![], - }) - } - - fn model_name(&self) -> &str { - "mock-llm" - } - - fn config(&self) -> &LLMConfig { - // Return a static config - static CONFIG: LLMConfig = LLMConfig { - api_base_url: String::new(), - api_key: String::new(), - model_efficient: String::new(), - temperature: 0.7, - max_tokens: 2048, - }; - &CONFIG - } -} - -#[cfg(test)] -mod tests { - use super::*; - use tempfile::TempDir; - - async fn setup_test_filesystem() -> (Arc, TempDir) { - let temp_dir = TempDir::new().unwrap(); - let fs = Arc::new(CortexFilesystem::with_tenant( - temp_dir.path(), - "test-tenant", - )); - fs.initialize().await.unwrap(); - (fs, temp_dir) - } - - fn mock_llm_client() -> Arc { - Arc::new(MockLLMClient::new()) - } - - fn default_config() -> LayerGenerationConfig { - LayerGenerationConfig { - batch_size: 2, - delay_ms: 100, - auto_generate_on_startup: false, - abstract_config: AbstractConfig { - max_tokens: 400, - max_chars: 2000, - target_sentences: 2, - }, - overview_config: OverviewConfig { - max_tokens: 1500, - max_chars: 6000, - }, - } - } - - #[tokio::test] - async fn test_scan_all_directories_empty() { - let (fs, _temp) = setup_test_filesystem().await; - let generator = LayerGenerator::new(fs, mock_llm_client(), default_config()); - - let dirs = generator.scan_all_directories().await.unwrap(); - - assert_eq!(dirs.len(), 0, "Empty filesystem should return no directories"); - } - - #[tokio::test] - async fn test_scan_all_directories_with_files() { - let (fs, _temp) = setup_test_filesystem().await; - - // Create test directories with files - fs.write("cortex://user/test-user/preferences/pref1.md", "content").await.unwrap(); - fs.write("cortex://agent/test-agent/cases/case1.md", "content").await.unwrap(); - fs.write("cortex://session/test-session/timeline/2026-02/25/msg1.md", "content").await.unwrap(); - - let generator = LayerGenerator::new(fs, mock_llm_client(), default_config()); - let dirs = generator.scan_all_directories().await.unwrap(); - - // Should find user/test-user, user/test-user/preferences, agent/test-agent, etc. - assert!(dirs.len() > 0, "Should find directories"); - assert!(dirs.iter().any(|d| d.contains("preferences")), "Should find preferences dir"); - assert!(dirs.iter().any(|d| d.contains("cases")), "Should find cases dir"); - } - - #[tokio::test] - async fn test_scan_nested_directories() { - let (fs, _temp) = setup_test_filesystem().await; - - fs.write("cortex://user/u1/prefs/lang/rust.md", "content").await.unwrap(); - fs.write("cortex://user/u1/prefs/lang/python.md", "content").await.unwrap(); - - let generator = LayerGenerator::new(fs, mock_llm_client(), default_config()); - let dirs = generator.scan_all_directories().await.unwrap(); - - // Should include all nested levels - assert!(dirs.iter().any(|d| d.contains("user/u1"))); - assert!(dirs.iter().any(|d| d.contains("prefs"))); - assert!(dirs.iter().any(|d| d.contains("lang"))); - } - - #[tokio::test] - async fn test_has_layers_both_present() { - let (fs, _temp) = setup_test_filesystem().await; - - fs.write("cortex://user/test/.abstract.md", "abstract").await.unwrap(); - fs.write("cortex://user/test/.overview.md", "overview").await.unwrap(); - - let generator = LayerGenerator::new(fs, mock_llm_client(), default_config()); - let has_layers = generator.has_layers("cortex://user/test").await.unwrap(); - - assert!(has_layers, "Should have layers when both files exist"); - } - - #[tokio::test] - async fn test_has_layers_missing_abstract() { - let (fs, _temp) = setup_test_filesystem().await; - - fs.write("cortex://user/test/.overview.md", "overview").await.unwrap(); - - let generator = LayerGenerator::new(fs, mock_llm_client(), default_config()); - let has_layers = generator.has_layers("cortex://user/test").await.unwrap(); - - assert!(!has_layers, "Should not have layers when abstract is missing"); - } - - #[tokio::test] - async fn test_has_layers_missing_overview() { - let (fs, _temp) = setup_test_filesystem().await; - - fs.write("cortex://user/test/.abstract.md", "abstract").await.unwrap(); - - let generator = LayerGenerator::new(fs, mock_llm_client(), default_config()); - let has_layers = generator.has_layers("cortex://user/test").await.unwrap(); - - assert!(!has_layers, "Should not have layers when overview is missing"); - } - - #[tokio::test] - async fn test_has_layers_both_missing() { - let (fs, _temp) = setup_test_filesystem().await; - - fs.write("cortex://user/test/file.md", "content").await.unwrap(); - - let generator = LayerGenerator::new(fs, mock_llm_client(), default_config()); - let has_layers = generator.has_layers("cortex://user/test").await.unwrap(); - - assert!(!has_layers, "Should not have layers when both files are missing"); - } - - #[tokio::test] - async fn test_filter_missing_layers() { - let (fs, _temp) = setup_test_filesystem().await; - - // Create one complete directory - fs.write("cortex://user/complete/.abstract.md", "a").await.unwrap(); - fs.write("cortex://user/complete/.overview.md", "o").await.unwrap(); - fs.write("cortex://user/complete/file.md", "content").await.unwrap(); - - // Create two incomplete directories - fs.write("cortex://user/missing1/file.md", "content").await.unwrap(); - fs.write("cortex://user/missing2/file.md", "content").await.unwrap(); - - let generator = LayerGenerator::new(fs, mock_llm_client(), default_config()); - - let all_dirs = vec![ - "cortex://user/complete".to_string(), - "cortex://user/missing1".to_string(), - "cortex://user/missing2".to_string(), - ]; - - let missing = generator.filter_missing_layers(&all_dirs).await.unwrap(); - - assert_eq!(missing.len(), 2, "Should find 2 missing directories"); - assert!(missing.contains(&"cortex://user/missing1".to_string())); - assert!(missing.contains(&"cortex://user/missing2".to_string())); - assert!(!missing.contains(&"cortex://user/complete".to_string())); - } - - #[tokio::test] - async fn test_ensure_all_layers_empty_filesystem() { - let (fs, _temp) = setup_test_filesystem().await; - let generator = LayerGenerator::new(fs, mock_llm_client(), default_config()); - - let stats = generator.ensure_all_layers().await.unwrap(); - - assert_eq!(stats.total, 0); - assert_eq!(stats.generated, 0); - assert_eq!(stats.failed, 0); - } - - #[tokio::test] - async fn test_ensure_all_layers_with_missing() { - let (fs, _temp) = setup_test_filesystem().await; - - // Create directories with content but no L0/L1 - fs.write("cortex://user/test1/pref.md", "User preference content for testing").await.unwrap(); - fs.write("cortex://user/test2/pref.md", "Another preference for testing").await.unwrap(); - - let generator = LayerGenerator::new(fs, mock_llm_client(), default_config()); - let stats = generator.ensure_all_layers().await.unwrap(); - - // Should attempt to generate for missing directories - assert!(stats.total > 0, "Should find directories needing generation"); - assert!(stats.generated > 0 || stats.failed > 0, "Should attempt generation"); - } - - #[tokio::test] - async fn test_regenerate_oversized_abstracts_no_oversized() { - let (fs, _temp) = setup_test_filesystem().await; - - // Create a normal-sized abstract - let normal_content = "Short abstract.\n\n**Added**: 2026-02-25 12:00:00 UTC"; - fs.write("cortex://user/test/.abstract.md", normal_content).await.unwrap(); - fs.write("cortex://user/test/file.md", "content").await.unwrap(); - - let generator = LayerGenerator::new(fs.clone(), mock_llm_client(), default_config()); - let stats = generator.regenerate_oversized_abstracts().await.unwrap(); - - assert_eq!(stats.total, 0, "Should not find any oversized abstracts"); - assert_eq!(stats.regenerated, 0); - } -} diff --git a/cortex-mem-core/src/automation/mod.rs b/cortex-mem-core/src/automation/mod.rs index 68be3f9..817c268 100644 --- a/cortex-mem-core/src/automation/mod.rs +++ b/cortex-mem-core/src/automation/mod.rs @@ -3,10 +3,6 @@ mod layer_generator; mod manager; mod sync; -#[cfg(test)] -#[path = "layer_generator_tests.rs"] -mod layer_generator_tests; - pub use indexer::{AutoIndexer, IndexStats, IndexerConfig}; pub use layer_generator::{ AbstractConfig, GenerationStats, LayerGenerationConfig, LayerGenerator, OverviewConfig, diff --git a/cortex-mem-core/src/filesystem/mod.rs b/cortex-mem-core/src/filesystem/mod.rs index 34e67f7..cd138bc 100644 --- a/cortex-mem-core/src/filesystem/mod.rs +++ b/cortex-mem-core/src/filesystem/mod.rs @@ -1,8 +1,5 @@ pub mod uri; pub mod operations; -#[cfg(test)] -mod tests; - pub use uri::{CortexUri, UriParser}; pub use operations::{CortexFilesystem, FilesystemOperations}; diff --git a/cortex-mem-core/src/filesystem/tests.rs b/cortex-mem-core/src/filesystem/tests.rs deleted file mode 100644 index 9b2abd4..0000000 --- a/cortex-mem-core/src/filesystem/tests.rs +++ /dev/null @@ -1,101 +0,0 @@ -//! Filesystem URI tests - -#[cfg(test)] -mod tests { - use super::super::{CortexUri, UriParser}; - use crate::Dimension; - - #[test] - fn test_parse_user_preferences() { - let uri_str = "cortex://user/preferences/language.md"; - let uri = UriParser::parse(uri_str).unwrap(); - - assert!(matches!(uri.dimension, Dimension::User)); - assert_eq!(uri.category, "preferences"); - assert_eq!(uri.resource, Some("language.md".to_string())); - } - - #[test] - fn test_parse_agent_cases() { - let uri_str = "cortex://agent/cases/case_001.md"; - let uri = UriParser::parse(uri_str).unwrap(); - - assert!(matches!(uri.dimension, Dimension::Agent)); - assert_eq!(uri.category, "cases"); - assert_eq!(uri.resource, Some("case_001.md".to_string())); - } - - #[test] - fn test_parse_session() { - let uri_str = "cortex://session/abc123/timeline"; - let uri = UriParser::parse(uri_str).unwrap(); - - assert!(matches!(uri.dimension, Dimension::Session)); - assert_eq!(uri.category, "abc123"); - assert_eq!(uri.subcategory, Some("timeline".to_string())); - } - - #[test] - fn test_parse_resources() { - let uri_str = "cortex://resources/my_project/docs/readme.md"; - let uri = UriParser::parse(uri_str).unwrap(); - - assert!(matches!(uri.dimension, Dimension::Resources)); - assert_eq!(uri.category, "my_project"); - assert_eq!(uri.subcategory, Some("docs".to_string())); - assert_eq!(uri.resource, Some("readme.md".to_string())); - } - - #[test] - fn test_to_uri_string() { - let uri = CortexUri::new(Dimension::User); - assert_eq!(uri.to_uri_string(), "cortex://user"); - } - - #[test] - fn test_directory_uri() { - let uri_str = "cortex://user/preferences/language.md"; - let uri = UriParser::parse(uri_str).unwrap(); - assert_eq!(uri.directory_uri(), "cortex://user/preferences"); - } - - #[test] - fn test_invalid_scheme() { - let result = UriParser::parse("http://example.com"); - assert!(result.is_err()); - } - - #[test] - fn test_invalid_dimension() { - let result = UriParser::parse("cortex://invalid/path"); - assert!(result.is_err()); - } - - #[test] - fn test_helper_methods() { - assert_eq!( - CortexUri::user_preferences("language"), - "cortex://user/preferences/language.md" - ); - assert_eq!( - CortexUri::user_entities("project_alpha"), - "cortex://user/entities/project_alpha.md" - ); - assert_eq!( - CortexUri::user_events("decision_001"), - "cortex://user/events/decision_001.md" - ); - assert_eq!( - CortexUri::agent_cases("bug_fix_001"), - "cortex://agent/cases/bug_fix_001.md" - ); - assert_eq!( - CortexUri::agent_skills("search_code"), - "cortex://agent/skills/search_code.md" - ); - assert_eq!( - CortexUri::session("abc123"), - "cortex://session/abc123" - ); - } -} diff --git a/cortex-mem-core/src/vector_store/mod.rs b/cortex-mem-core/src/vector_store/mod.rs index d17d06a..cb63815 100644 --- a/cortex-mem-core/src/vector_store/mod.rs +++ b/cortex-mem-core/src/vector_store/mod.rs @@ -1,8 +1,5 @@ pub mod qdrant; -#[cfg(test)] -mod tests; - use crate::{ error::Result, types::{ContextLayer, Filters, Memory, ScoredMemory}, diff --git a/cortex-mem-core/src/vector_store/tests.rs b/cortex-mem-core/src/vector_store/tests.rs deleted file mode 100644 index 2ef7fa4..0000000 --- a/cortex-mem-core/src/vector_store/tests.rs +++ /dev/null @@ -1,106 +0,0 @@ -//! Vector store tests - -#[cfg(test)] -mod tests { - use super::super::{uri_to_vector_id, parse_vector_id}; - use crate::types::ContextLayer; - - /// Check if a string is a valid UUID format (8-4-4-4-12 hex digits) - fn is_valid_uuid_format(s: &str) -> bool { - let parts: Vec<&str> = s.split('-').collect(); - if parts.len() != 5 { - return false; - } - parts[0].len() == 8 && parts[1].len() == 4 && - parts[2].len() == 4 && parts[3].len() == 4 && parts[4].len() == 12 && - s.chars().filter(|&c| c != '-').all(|c| c.is_ascii_hexdigit()) - } - - #[test] - fn test_uri_to_vector_id_l0() { - let uri = "cortex://session/abc/timeline.md"; - let id = uri_to_vector_id(uri, ContextLayer::L0Abstract); - assert!(is_valid_uuid_format(&id), "ID should be valid UUID format, got: {}", id); - // Verify deterministic - same input produces same ID - let id2 = uri_to_vector_id(uri, ContextLayer::L0Abstract); - assert_eq!(id, id2, "ID should be deterministic"); - } - - #[test] - fn test_uri_to_vector_id_l1() { - let uri = "cortex://user/preferences/language.md"; - let id = uri_to_vector_id(uri, ContextLayer::L1Overview); - assert!(is_valid_uuid_format(&id), "ID should be valid UUID format, got: {}", id); - // Verify deterministic - let id2 = uri_to_vector_id(uri, ContextLayer::L1Overview); - assert_eq!(id, id2, "ID should be deterministic"); - } - - #[test] - fn test_uri_to_vector_id_l2() { - let uri = "cortex://agent/cases/case_001.md"; - let id = uri_to_vector_id(uri, ContextLayer::L2Detail); - assert!(is_valid_uuid_format(&id), "ID should be valid UUID format, got: {}", id); - // Verify deterministic - let id2 = uri_to_vector_id(uri, ContextLayer::L2Detail); - assert_eq!(id, id2, "ID should be deterministic"); - } - - #[test] - fn test_different_layers_produce_different_ids() { - let uri = "cortex://session/test/message.md"; - let id_l0 = uri_to_vector_id(uri, ContextLayer::L0Abstract); - let id_l1 = uri_to_vector_id(uri, ContextLayer::L1Overview); - let id_l2 = uri_to_vector_id(uri, ContextLayer::L2Detail); - - // Different layers should produce different IDs - assert_ne!(id_l0, id_l1, "L0 and L1 should have different IDs"); - assert_ne!(id_l1, id_l2, "L1 and L2 should have different IDs"); - assert_ne!(id_l0, id_l2, "L0 and L2 should have different IDs"); - } - - #[test] - fn test_different_uris_produce_different_ids() { - let uri1 = "cortex://session/test/message1.md"; - let uri2 = "cortex://session/test/message2.md"; - - let id1 = uri_to_vector_id(uri1, ContextLayer::L2Detail); - let id2 = uri_to_vector_id(uri2, ContextLayer::L2Detail); - - assert_ne!(id1, id2, "Different URIs should produce different IDs"); - } - - #[test] - fn test_parse_vector_id_with_layer_suffix() { - // Test parsing legacy IDs that contain layer suffix (for backward compatibility) - let id = "cortex://session/abc/timeline.md#/L0"; - let (uri, layer) = parse_vector_id(id); - assert_eq!(uri, "cortex://session/abc/timeline.md"); - assert!(matches!(layer, ContextLayer::L0Abstract)); - } - - #[test] - fn test_parse_vector_id_l1() { - let id = "cortex://user/preferences/language.md#/L1"; - let (uri, layer) = parse_vector_id(id); - assert_eq!(uri, "cortex://user/preferences/language.md"); - assert!(matches!(layer, ContextLayer::L1Overview)); - } - - #[test] - fn test_parse_vector_id_l2() { - let id = "cortex://agent/cases/case_001.md"; - let (uri, layer) = parse_vector_id(id); - assert_eq!(uri, "cortex://agent/cases/case_001.md"); - assert!(matches!(layer, ContextLayer::L2Detail)); - } - - #[test] - fn test_parse_vector_id_uuid_format() { - // parse_vector_id should return UUID as-is (cannot reverse hash) - let uuid_id = "547e892d-584e-279b-b987-414299aa3dcf"; - let (result, layer) = parse_vector_id(uuid_id); - assert_eq!(result, uuid_id); - assert!(matches!(layer, ContextLayer::L2Detail)); - } -} diff --git a/cortex-mem-tools/tests/core_functionality_tests.rs b/cortex-mem-tools/tests/core_functionality_tests.rs deleted file mode 100644 index 0adb73e..0000000 --- a/cortex-mem-tools/tests/core_functionality_tests.rs +++ /dev/null @@ -1,1281 +0,0 @@ -//! Cortex-Mem 核心功能测试 -//! -//! 测试分类: -//! 1. 单元测试 (unit_*) - 不依赖外部服务,使用 Mock -//! 2. 集成测试 (integration_*) - 需要外部服务 (Qdrant, LLM, Embedding) -//! -//! 运行方式: -//! - 单元测试: `cargo test` -//! - 集成测试: `cargo test -- --ignored` (需要配置外部服务) - -#![cfg(test)] - -use std::collections::HashMap; -use std::sync::Arc; -use tempfile::TempDir; -use tokio::sync::RwLock; - -// ==================== Mock 实现 ==================== - -mod mock { - use async_trait::async_trait; - use cortex_mem_core::llm::LLMClient; - use cortex_mem_core::llm::{LLMConfig, MemoryExtractionResponse}; - use cortex_mem_core::llm::extractor_types::{StructuredFactExtraction, DetailedFactExtraction}; - use cortex_mem_core::Result; - - /// Mock LLM Client - 返回预定义的响应 - pub struct MockLLMClient { - config: LLMConfig, - } - - impl MockLLMClient { - pub fn new() -> Self { - Self { - config: LLMConfig::default(), - } - } - } - - impl Default for MockLLMClient { - fn default() -> Self { - Self::new() - } - } - - #[async_trait] - impl LLMClient for MockLLMClient { - async fn complete(&self, _prompt: &str) -> Result { - Ok("Mock LLM response".to_string()) - } - - async fn complete_with_system(&self, _system: &str, _prompt: &str) -> Result { - Ok("Mock LLM response with system prompt".to_string()) - } - - async fn extract_memories(&self, _prompt: &str) -> Result { - Ok(MemoryExtractionResponse { - facts: vec![], - decisions: vec![], - entities: vec![], - }) - } - - async fn extract_structured_facts(&self, _prompt: &str) -> Result { - Ok(StructuredFactExtraction { facts: vec![] }) - } - - async fn extract_detailed_facts(&self, _prompt: &str) -> Result { - Ok(DetailedFactExtraction { facts: vec![] }) - } - - fn model_name(&self) -> &str { - "mock-model" - } - - fn config(&self) -> &LLMConfig { - &self.config - } - } -} - -// ==================== 测试辅助函数 ==================== - -mod test_utils { - use super::*; - use cortex_mem_core::{ - CortexFilesystem, FilesystemOperations, SessionConfig, SessionManager, - layers::manager::LayerManager, - }; - - /// 测试用的上下文封装 - /// - /// 由于 MemoryOperations 的字段是 pub(crate),测试无法直接构造。 - /// 这个结构体封装了测试所需的核心功能。 - #[allow(dead_code)] - pub struct TestContext { - pub filesystem: Arc, - pub session_manager: Arc>, - pub layer_manager: Arc, - pub temp_dir: TempDir, - } - - impl TestContext { - /// 创建新的测试上下文 - pub async fn new() -> Self { - let temp_dir = TempDir::new().unwrap(); - let data_dir = temp_dir.path().to_str().unwrap(); - - let filesystem = Arc::new(CortexFilesystem::new(data_dir)); - filesystem.initialize().await.unwrap(); - - let config = SessionConfig::default(); - let session_manager = SessionManager::new(filesystem.clone(), config); - let session_manager = Arc::new(RwLock::new(session_manager)); - - let llm_client = Arc::new(mock::MockLLMClient::new()); - let layer_manager = Arc::new(LayerManager::new(filesystem.clone(), llm_client)); - - Self { - filesystem, - session_manager, - layer_manager, - temp_dir, - } - } - - /// 创建带有租户隔离的测试上下文 - pub async fn with_tenant(tenant_id: &str) -> Self { - let temp_dir = TempDir::new().unwrap(); - let data_dir = temp_dir.path().to_str().unwrap(); - - let filesystem = Arc::new(CortexFilesystem::with_tenant(data_dir, tenant_id)); - filesystem.initialize().await.unwrap(); - - let config = SessionConfig::default(); - let session_manager = SessionManager::new(filesystem.clone(), config); - let session_manager = Arc::new(RwLock::new(session_manager)); - - let llm_client = Arc::new(mock::MockLLMClient::new()); - let layer_manager = Arc::new(LayerManager::new(filesystem.clone(), llm_client)); - - Self { - filesystem, - session_manager, - layer_manager, - temp_dir, - } - } - - /// 添加消息到会话 - pub async fn add_message(&self, thread_id: &str, role: &str, content: &str) -> String { - let thread_id = if thread_id.is_empty() { "default" } else { thread_id }; - - { - let sm = self.session_manager.read().await; - if sm.session_exists(thread_id).await.unwrap() { - // Session exists, proceed to add message - } else { - drop(sm); - let sm = self.session_manager.write().await; - sm.create_session_with_ids(thread_id, None, None).await.unwrap(); - } - } - - let sm = self.session_manager.read().await; - let message = cortex_mem_core::Message::new( - match role { - "user" => cortex_mem_core::MessageRole::User, - "assistant" => cortex_mem_core::MessageRole::Assistant, - "system" => cortex_mem_core::MessageRole::System, - _ => cortex_mem_core::MessageRole::User, - }, - content, - ); - - sm.message_storage().save_message(thread_id, &message).await.unwrap() - } - - /// 列出会话 - pub async fn list_sessions(&self) -> Vec { - let entries = self.filesystem.list("cortex://session").await.unwrap(); - let mut session_infos = Vec::new(); - - for entry in entries { - if entry.is_directory { - let thread_id = entry.name.clone(); - if let Ok(metadata) = self.session_manager.read().await.load_session(&thread_id).await { - let status_str = match metadata.status { - cortex_mem_core::session::manager::SessionStatus::Active => "active", - cortex_mem_core::session::manager::SessionStatus::Closed => "closed", - cortex_mem_core::session::manager::SessionStatus::Archived => "archived", - }; - - session_infos.push(SessionInfo { - thread_id: metadata.thread_id, - status: status_str.to_string(), - message_count: 0, - created_at: metadata.created_at, - updated_at: metadata.updated_at, - }); - } - } - } - - session_infos - } - - /// 获取会话信息 - pub async fn get_session(&self, thread_id: &str) -> Result { - let sm = self.session_manager.read().await; - let metadata = sm.load_session(thread_id).await.map_err(|e| e.to_string())?; - - let status_str = match metadata.status { - cortex_mem_core::session::manager::SessionStatus::Active => "active", - cortex_mem_core::session::manager::SessionStatus::Closed => "closed", - cortex_mem_core::session::manager::SessionStatus::Archived => "archived", - }; - - Ok(SessionInfo { - thread_id: metadata.thread_id, - status: status_str.to_string(), - message_count: 0, - created_at: metadata.created_at, - updated_at: metadata.updated_at, - }) - } - - /// 关闭会话 - pub async fn close_session(&self, thread_id: &str) -> Result<(), String> { - let mut sm = self.session_manager.write().await; - sm.close_session(thread_id).await.map_err(|e| e.to_string())?; - Ok(()) - } - - /// 存储内容 - pub async fn store(&self, args: StoreArgs) -> StoreResponse { - let scope = match args.scope.as_str() { - "user" | "session" | "agent" => args.scope.as_str(), - _ => "session", - }; - - let uri = match scope { - "user" => { - let user_id = args.user_id.as_deref().unwrap_or("default"); - let now = chrono::Utc::now(); - let year_month = now.format("%Y-%m").to_string(); - let day = now.format("%d").to_string(); - let filename = format!( - "{}_{}.md", - now.format("%H_%M_%S"), - uuid::Uuid::new_v4().to_string().split('-').next().unwrap_or("unknown") - ); - format!("cortex://user/{}/memories/{}/{}/{}", user_id, year_month, day, filename) - }, - "agent" => { - let agent_id = args.agent_id.as_deref() - .or_else(|| if args.thread_id.is_empty() { None } else { Some(&args.thread_id) }) - .unwrap_or("default"); - let now = chrono::Utc::now(); - let year_month = now.format("%Y-%m").to_string(); - let day = now.format("%d").to_string(); - let filename = format!( - "{}_{}.md", - now.format("%H_%M_%S"), - uuid::Uuid::new_v4().to_string().split('-').next().unwrap_or("unknown") - ); - format!("cortex://agent/{}/memories/{}/{}/{}", agent_id, year_month, day, filename) - }, - "session" => { - self.add_message( - if args.thread_id.is_empty() { "default" } else { &args.thread_id }, - "user", - &args.content - ).await - }, - _ => unreachable!(), - }; - - if scope == "user" || scope == "agent" { - self.filesystem.write(&uri, &args.content).await.unwrap(); - } - - if args.auto_generate_layers.unwrap_or(true) { - let _ = self.layer_manager.generate_all_layers(&uri, &args.content).await; - } - - StoreResponse { - uri, - layers_generated: HashMap::new(), - success: true, - } - } - - /// 获取 L0 abstract - pub async fn get_abstract(&self, uri: &str) -> Result { - let text: String = self.layer_manager - .load(uri, cortex_mem_core::ContextLayer::L0Abstract) - .await - .map_err(|e: cortex_mem_core::Error| e.to_string())?; - Ok(AbstractResponse { - uri: uri.to_string(), - abstract_text: text.clone(), - layer: "L0".to_string(), - token_count: text.split_whitespace().count(), - }) - } - - /// 获取 L1 overview - pub async fn get_overview(&self, uri: &str) -> Result { - let text: String = self.layer_manager - .load(uri, cortex_mem_core::ContextLayer::L1Overview) - .await - .map_err(|e: cortex_mem_core::Error| e.to_string())?; - Ok(OverviewResponse { - uri: uri.to_string(), - overview_text: text.clone(), - layer: "L1".to_string(), - token_count: text.split_whitespace().count(), - }) - } - - /// 获取 L2 完整内容 - pub async fn get_read(&self, uri: &str) -> Result { - let content = self.filesystem.read(uri).await.map_err(|e| e.to_string())?; - Ok(ReadResponse { - uri: uri.to_string(), - content: content.clone(), - layer: "L2".to_string(), - token_count: content.split_whitespace().count(), - metadata: None, - }) - } - - /// 列出目录 - pub async fn list(&self, uri: &str) -> Vec { - self.filesystem.list(uri).await - .map(|entries| entries.into_iter().map(|e| e.uri).collect()) - .unwrap_or_default() - } - - /// 读取文件 - pub async fn read(&self, uri: &str) -> Result { - self.filesystem.read(uri).await.map_err(|e| e.to_string()) - } - - /// 删除文件 - pub async fn delete(&self, uri: &str) -> Result<(), String> { - self.filesystem.delete(uri).await.map_err(|e| e.to_string()) - } - - /// 检查文件是否存在 - pub async fn exists(&self, uri: &str) -> bool { - self.filesystem.exists(uri).await.unwrap_or(false) - } - - /// 写入文件 - pub async fn write(&self, uri: &str, content: &str) -> Result<(), String> { - self.filesystem.write(uri, content).await.map_err(|e| e.to_string()) - } - } - - // 类型定义 - #[allow(dead_code)] - #[derive(Debug, Clone)] - pub struct SessionInfo { - pub thread_id: String, - pub status: String, - pub message_count: usize, - pub created_at: chrono::DateTime, - pub updated_at: chrono::DateTime, - } - - #[allow(dead_code)] - #[derive(Debug, Clone)] - pub struct StoreArgs { - pub content: String, - pub thread_id: String, - pub metadata: Option, - pub auto_generate_layers: Option, - pub scope: String, - pub user_id: Option, - pub agent_id: Option, - } - - #[allow(dead_code)] - #[derive(Debug, Clone)] - pub struct StoreResponse { - pub uri: String, - pub layers_generated: HashMap, - pub success: bool, - } - - #[allow(dead_code)] - #[derive(Debug, Clone)] - pub struct AbstractResponse { - pub uri: String, - pub abstract_text: String, - pub layer: String, - pub token_count: usize, - } - - #[allow(dead_code)] - #[derive(Debug, Clone)] - pub struct OverviewResponse { - pub uri: String, - pub overview_text: String, - pub layer: String, - pub token_count: usize, - } - - #[allow(dead_code)] - #[derive(Debug, Clone)] - pub struct ReadResponse { - pub uri: String, - pub content: String, - pub layer: String, - pub token_count: usize, - pub metadata: Option, - } - - #[allow(dead_code)] - #[derive(Debug, Clone)] - pub struct FileMetadata { - pub created_at: chrono::DateTime, - pub updated_at: chrono::DateTime, - } -} - -// ==================== 单元测试: 文件系统基础操作 ==================== - -mod unit_filesystem_tests { - use super::*; - - /// 测试基本的文件写入和读取 - #[tokio::test] - async fn test_basic_write_and_read() { - let ctx = test_utils::TestContext::new().await; - - let content = "Hello, Cortex Memory!"; - let uri = "cortex://resources/test.md"; - ctx.write(uri, content).await.unwrap(); - - let read_content = ctx.read(uri).await.unwrap(); - assert_eq!(read_content, content); - } - - /// 测试文件存在性检查 - #[tokio::test] - async fn test_file_exists() { - let ctx = test_utils::TestContext::new().await; - - assert!(!ctx.exists("cortex://resources/nonexistent.md").await); - - ctx.write("cortex://resources/test.md", "content").await.unwrap(); - assert!(ctx.exists("cortex://resources/test.md").await); - } - - /// 测试文件删除 - #[tokio::test] - async fn test_file_delete() { - let ctx = test_utils::TestContext::new().await; - - let uri = "cortex://resources/to_delete.md"; - - ctx.write(uri, "content").await.unwrap(); - assert!(ctx.exists(uri).await); - - ctx.delete(uri).await.unwrap(); - assert!(!ctx.exists(uri).await); - - let result = ctx.delete(uri).await; - assert!(result.is_err()); - } - - /// 测试目录列表 - #[tokio::test] - async fn test_list_directory() { - let ctx = test_utils::TestContext::new().await; - - ctx.write("cortex://resources/file1.md", "content1").await.unwrap(); - ctx.write("cortex://resources/file2.md", "content2").await.unwrap(); - ctx.write("cortex://resources/subdir/file3.md", "content3").await.unwrap(); - - let entries = ctx.list("cortex://resources").await; - - assert!(entries.len() >= 2); - - let names: Vec<&str> = entries.iter().map(|e| e.rsplit('/').next().unwrap()).collect(); - assert!(names.contains(&"file1.md")); - assert!(names.contains(&"file2.md")); - } - - /// 测试嵌套目录创建 - #[tokio::test] - async fn test_nested_directory_creation() { - let ctx = test_utils::TestContext::new().await; - - let uri = "cortex://resources/level1/level2/level3/deep.md"; - ctx.write(uri, "deep content").await.unwrap(); - - let content = ctx.read(uri).await.unwrap(); - assert_eq!(content, "deep content"); - } - - /// 测试空内容存储 - #[tokio::test] - async fn test_empty_content() { - let ctx = test_utils::TestContext::new().await; - - let uri = "cortex://resources/empty.md"; - ctx.write(uri, "").await.unwrap(); - - let content = ctx.read(uri).await.unwrap(); - assert!(content.is_empty()); - } - - /// 测试读取不存在的文件 - #[tokio::test] - async fn test_read_nonexistent_file() { - let ctx = test_utils::TestContext::new().await; - - let result = ctx.read("cortex://resources/nonexistent.md").await; - assert!(result.is_err()); - } -} - -// ==================== 单元测试: 会话管理 ==================== - -mod unit_session_tests { - use super::*; - - /// 测试添加消息到会话 - #[tokio::test] - async fn test_add_message() { - let ctx = test_utils::TestContext::new().await; - - let thread_id = "test_thread"; - let msg_id = ctx.add_message(thread_id, "user", "Hello, world!").await; - - assert!(!msg_id.is_empty()); - - let sessions = ctx.list_sessions().await; - assert_eq!(sessions.len(), 1); - assert_eq!(sessions[0].thread_id, thread_id); - } - - /// 测试空 thread_id 使用默认值 - #[tokio::test] - async fn test_empty_thread_id_defaults() { - let ctx = test_utils::TestContext::new().await; - - let msg_id = ctx.add_message("", "user", "test message").await; - assert!(!msg_id.is_empty()); - - let session = ctx.get_session("default").await.unwrap(); - assert_eq!(session.thread_id, "default"); - assert_eq!(session.status, "active"); - } - - /// 测试多角色消息 - #[tokio::test] - async fn test_multiple_roles() { - let ctx = test_utils::TestContext::new().await; - - let thread_id = "multi_role_thread"; - - ctx.add_message(thread_id, "user", "User message").await; - ctx.add_message(thread_id, "assistant", "Assistant response").await; - ctx.add_message(thread_id, "system", "System instruction").await; - - let session = ctx.get_session(thread_id).await.unwrap(); - assert_eq!(session.thread_id, thread_id); - } - - /// 测试会话关闭 - #[tokio::test] - async fn test_session_close() { - let ctx = test_utils::TestContext::new().await; - - let thread_id = "session_to_close"; - ctx.add_message(thread_id, "user", "message").await; - - ctx.close_session(thread_id).await.unwrap(); - - let session = ctx.get_session(thread_id).await.unwrap(); - assert_eq!(session.status, "closed"); - } - - /// 测试多个会话 - #[tokio::test] - async fn test_multiple_sessions() { - let ctx = test_utils::TestContext::new().await; - - ctx.add_message("thread1", "user", "message 1").await; - ctx.add_message("thread2", "user", "message 2").await; - ctx.add_message("thread3", "user", "message 3").await; - - let sessions = ctx.list_sessions().await; - assert_eq!(sessions.len(), 3); - - for session in &sessions { - assert_eq!(session.status, "active"); - } - } - - /// 测试获取不存在的会话 - #[tokio::test] - async fn test_get_nonexistent_session() { - let ctx = test_utils::TestContext::new().await; - - let result = ctx.get_session("nonexistent_session").await; - assert!(result.is_err()); - } -} - -// ==================== 单元测试: 存储操作 ==================== - -mod unit_storage_tests { - use super::*; - - /// 测试 session scope 存储 - #[tokio::test] - async fn test_store_session_scope() { - let ctx = test_utils::TestContext::new().await; - - let args = test_utils::StoreArgs { - content: "Session content".to_string(), - thread_id: "test_session".to_string(), - metadata: None, - auto_generate_layers: Some(true), - scope: "session".to_string(), - user_id: None, - agent_id: None, - }; - - let result = ctx.store(args).await; - assert!(result.success); - assert!(result.uri.starts_with("cortex://session/test_session/timeline")); - assert!(result.uri.ends_with(".md")); - - let content = ctx.read(&result.uri).await.unwrap(); - assert!(content.contains("Session content")); - } - - /// 测试 user scope 存储 - #[tokio::test] - async fn test_store_user_scope() { - let ctx = test_utils::TestContext::new().await; - - let args = test_utils::StoreArgs { - content: "User preference content".to_string(), - thread_id: "".to_string(), - metadata: None, - auto_generate_layers: Some(true), - scope: "user".to_string(), - user_id: Some("user_123".to_string()), - agent_id: None, - }; - - let result = ctx.store(args).await; - assert!(result.success); - assert!(result.uri.starts_with("cortex://user/user_123/memories")); - assert!(result.uri.ends_with(".md")); - } - - /// 测试 agent scope 存储 - #[tokio::test] - async fn test_store_agent_scope() { - let ctx = test_utils::TestContext::new().await; - - let args = test_utils::StoreArgs { - content: "Agent case content".to_string(), - thread_id: "".to_string(), - metadata: None, - auto_generate_layers: Some(true), - scope: "agent".to_string(), - user_id: None, - agent_id: Some("agent_456".to_string()), - }; - - let result = ctx.store(args).await; - assert!(result.success); - assert!(result.uri.starts_with("cortex://agent/agent_456/memories")); - assert!(result.uri.ends_with(".md")); - } - - /// 测试自动生成层 - #[tokio::test] - async fn test_auto_generate_layers() { - let ctx = test_utils::TestContext::new().await; - - let content = r#"# Test Document - -This is a test document with some content. - -## Section 1 -Content for section 1. -"#; - - let args = test_utils::StoreArgs { - content: content.to_string(), - thread_id: "".to_string(), - metadata: None, - auto_generate_layers: Some(true), - scope: "user".to_string(), - user_id: Some("layer_test_user".to_string()), - agent_id: None, - }; - - let result = ctx.store(args).await; - assert!(result.success); - - // 验证 L2 可读取 - let l2 = ctx.get_read(&result.uri).await.unwrap(); - assert!(l2.content.contains("Test Document")); - assert_eq!(l2.layer, "L2"); - - // 验证 L0 摘要可获取 - let l0 = ctx.get_abstract(&result.uri).await.unwrap(); - assert!(!l0.abstract_text.is_empty()); - assert_eq!(l0.layer, "L0"); - - // 验证 L1 概览可获取 - let l1 = ctx.get_overview(&result.uri).await.unwrap(); - assert!(!l1.overview_text.is_empty()); - assert_eq!(l1.layer, "L1"); - } - - /// 测试存储带元数据 - #[tokio::test] - async fn test_store_with_metadata() { - let ctx = test_utils::TestContext::new().await; - - let metadata = serde_json::json!({ - "importance": "high", - "tags": ["rust", "testing"], - }); - - let args = test_utils::StoreArgs { - content: "Content with metadata".to_string(), - thread_id: "".to_string(), - metadata: Some(metadata), - auto_generate_layers: Some(false), - scope: "user".to_string(), - user_id: Some("metadata_user".to_string()), - agent_id: None, - }; - - let result = ctx.store(args).await; - assert!(result.success); - } -} - -// ==================== 单元测试: 多租户隔离 ==================== - -mod unit_tenant_isolation_tests { - use super::*; - - /// 测试租户数据隔离 - #[tokio::test] - async fn test_tenant_data_isolation() { - let ctx_a = test_utils::TestContext::with_tenant("tenant_a").await; - let ctx_b = test_utils::TestContext::with_tenant("tenant_b").await; - - // 租户 A 存储数据 - let args_a = test_utils::StoreArgs { - content: "Tenant A private data".to_string(), - thread_id: "".to_string(), - metadata: None, - auto_generate_layers: Some(false), - scope: "user".to_string(), - user_id: Some("shared_user".to_string()), - agent_id: None, - }; - let result_a = ctx_a.store(args_a).await; - - // 租户 B 存储数据 - let args_b = test_utils::StoreArgs { - content: "Tenant B private data".to_string(), - thread_id: "".to_string(), - metadata: None, - auto_generate_layers: Some(false), - scope: "user".to_string(), - user_id: Some("shared_user".to_string()), - agent_id: None, - }; - let result_b = ctx_b.store(args_b).await; - - // 验证 URI 不同 - assert_ne!(result_a.uri, result_b.uri); - - // 验证租户 A 能读取自己的数据 - let content_a = ctx_a.read(&result_a.uri).await.unwrap(); - assert!(content_a.contains("Tenant A")); - - // 验证租户 B 能读取自己的数据 - let content_b = ctx_b.read(&result_b.uri).await.unwrap(); - assert!(content_b.contains("Tenant B")); - - // 验证租户 A 不能读取租户 B 的数据 - let read_result = ctx_a.read(&result_b.uri).await; - assert!(read_result.is_err()); - } - - /// 测试会话隔离 - #[tokio::test] - async fn test_session_isolation() { - let ctx_a = test_utils::TestContext::with_tenant("tenant_a").await; - let ctx_b = test_utils::TestContext::with_tenant("tenant_b").await; - - ctx_a.add_message("shared_thread_id", "user", "Tenant A message").await; - ctx_b.add_message("shared_thread_id", "user", "Tenant B message").await; - - let sessions_a = ctx_a.list_sessions().await; - let sessions_b = ctx_b.list_sessions().await; - - assert_eq!(sessions_a.len(), 1); - assert_eq!(sessions_b.len(), 1); - } -} - -// ==================== 单元测试: Scope 隔离 ==================== - -mod unit_scope_isolation_tests { - use super::*; - - /// 测试不同 scope 的存储路径 - #[tokio::test] - async fn test_scope_path_isolation() { - let ctx = test_utils::TestContext::new().await; - - // Session scope - let session_args = test_utils::StoreArgs { - content: "Session data".to_string(), - thread_id: "my_thread".to_string(), - metadata: None, - auto_generate_layers: Some(false), - scope: "session".to_string(), - user_id: None, - agent_id: None, - }; - let session_result = ctx.store(session_args).await; - assert!(session_result.uri.starts_with("cortex://session/my_thread")); - - // User scope - let user_args = test_utils::StoreArgs { - content: "User data".to_string(), - thread_id: "".to_string(), - metadata: None, - auto_generate_layers: Some(false), - scope: "user".to_string(), - user_id: Some("user_001".to_string()), - agent_id: None, - }; - let user_result = ctx.store(user_args).await; - assert!(user_result.uri.starts_with("cortex://user/user_001")); - - // Agent scope - let agent_args = test_utils::StoreArgs { - content: "Agent data".to_string(), - thread_id: "".to_string(), - metadata: None, - auto_generate_layers: Some(false), - scope: "agent".to_string(), - user_id: None, - agent_id: Some("agent_001".to_string()), - }; - let agent_result = ctx.store(agent_args).await; - assert!(agent_result.uri.starts_with("cortex://agent/agent_001")); - - // 验证所有 URI 都不同 - assert_ne!(session_result.uri, user_result.uri); - assert_ne!(user_result.uri, agent_result.uri); - assert_ne!(session_result.uri, agent_result.uri); - } - - /// 测试不同 user_id 之间的隔离 - #[tokio::test] - async fn test_user_id_isolation() { - let ctx = test_utils::TestContext::new().await; - - let args_a = test_utils::StoreArgs { - content: "User A data".to_string(), - thread_id: "".to_string(), - metadata: None, - auto_generate_layers: Some(false), - scope: "user".to_string(), - user_id: Some("user_a".to_string()), - agent_id: None, - }; - let result_a = ctx.store(args_a).await; - - let args_b = test_utils::StoreArgs { - content: "User B data".to_string(), - thread_id: "".to_string(), - metadata: None, - auto_generate_layers: Some(false), - scope: "user".to_string(), - user_id: Some("user_b".to_string()), - agent_id: None, - }; - let result_b = ctx.store(args_b).await; - - assert_ne!(result_a.uri, result_b.uri); - assert!(result_a.uri.contains("user_a")); - assert!(result_b.uri.contains("user_b")); - } -} - -// ==================== 单元测试: 边界情况 ==================== - -mod unit_edge_case_tests { - use super::*; - - /// 测试特殊字符内容 - #[tokio::test] - async fn test_special_characters() { - let ctx = test_utils::TestContext::new().await; - - let special_contents = vec![ - ("中文内容", "Chinese characters"), - ("Emoji 🎉🚀💡", "Emojis"), - ("Tabs\tand\tspaces", "Tabs"), - ("Newlines\nLine1\nLine2", "Newlines"), - ("Quotes: \"double\" 'single'", "Quotes"), - ("HTML & entities", "HTML"), - ("Code: `fn main() {}`", "Code"), - ]; - - for (content, desc) in special_contents { - let args = test_utils::StoreArgs { - content: content.to_string(), - thread_id: "".to_string(), - metadata: None, - auto_generate_layers: Some(false), - scope: "user".to_string(), - user_id: Some("special_char_test".to_string()), - agent_id: None, - }; - - let result = ctx.store(args).await; - assert!(result.success, "Failed for: {}", desc); - - let read_content = ctx.read(&result.uri).await.unwrap(); - assert!(read_content.contains(content), "Content mismatch for: {}", desc); - } - } - - /// 测试大内容存储 - #[tokio::test] - async fn test_large_content() { - let ctx = test_utils::TestContext::new().await; - - let large_content = "X".repeat(50 * 1024); - - let args = test_utils::StoreArgs { - content: large_content.clone(), - thread_id: "".to_string(), - metadata: None, - auto_generate_layers: Some(false), - scope: "user".to_string(), - user_id: Some("large_content_user".to_string()), - agent_id: None, - }; - - let result = ctx.store(args).await; - assert!(result.success); - - let read_content = ctx.read(&result.uri).await.unwrap(); - assert!(read_content.len() >= large_content.len() - 10); - } - - /// 测试特殊 thread_id - #[tokio::test] - async fn test_special_thread_ids() { - let ctx = test_utils::TestContext::new().await; - - let special_ids = vec![ - "thread-with-dash", - "thread_with_underscore", - "thread.with.dot", - "thread123", - "123thread", - ]; - - for thread_id in special_ids { - let result = ctx.add_message(thread_id, "user", "test message").await; - assert!(!result.is_empty(), "Failed for thread_id: {}", thread_id); - } - } - - /// 测试无效的 scope - #[tokio::test] - async fn test_invalid_scope() { - let ctx = test_utils::TestContext::new().await; - - let args = test_utils::StoreArgs { - content: "test".to_string(), - thread_id: "test_thread".to_string(), - metadata: None, - auto_generate_layers: Some(false), - scope: "invalid_scope".to_string(), - user_id: None, - agent_id: None, - }; - - let result = ctx.store(args).await; - assert!(result.uri.starts_with("cortex://session")); - } -} - -// ==================== 单元测试: 并发操作 ==================== - -mod unit_concurrent_tests { - use super::*; - - /// 测试并发写入 - #[tokio::test] - async fn test_concurrent_writes() { - let ctx = Arc::new(test_utils::TestContext::new().await); - - let mut handles = vec![]; - - for i in 0..20 { - let ctx_clone = ctx.clone(); - let handle = tokio::spawn(async move { - ctx_clone.add_message("concurrent_test", "user", &format!("Message {}", i)).await - }); - handles.push(handle); - } - - let results: Vec<_> = futures::future::join_all(handles).await; - let success_count = results.iter().filter(|r| !r.as_ref().unwrap().is_empty()).count(); - - assert_eq!(success_count, 20, "All concurrent writes should succeed"); - } - - /// 测试并发读取 - #[tokio::test] - async fn test_concurrent_reads() { - let ctx = Arc::new(test_utils::TestContext::new().await); - - let args = test_utils::StoreArgs { - content: "Shared content for concurrent reads".to_string(), - thread_id: "".to_string(), - metadata: None, - auto_generate_layers: Some(false), - scope: "user".to_string(), - user_id: Some("concurrent_read_user".to_string()), - agent_id: None, - }; - let result = ctx.store(args).await; - let uri = Arc::new(result.uri); - - let mut handles = vec![]; - - for _ in 0..50 { - let ctx_clone = ctx.clone(); - let uri_clone = uri.clone(); - let handle = tokio::spawn(async move { - ctx_clone.read(&uri_clone).await - }); - handles.push(handle); - } - - let results: Vec<_> = futures::future::join_all(handles).await; - let success_count = results.iter().filter(|r| r.is_ok() && r.as_ref().unwrap().is_ok()).count(); - - assert_eq!(success_count, 50, "All concurrent reads should succeed"); - } - - /// 测试并发读写 - #[tokio::test] - async fn test_concurrent_read_write() { - let ctx = Arc::new(test_utils::TestContext::new().await); - - for i in 0..5 { - ctx.add_message("rw_test", "user", &format!("Initial {}", i)).await; - } - - let mut handles: Vec>> = vec![]; - - for i in 0..20 { - let ctx_clone = ctx.clone(); - let handle = tokio::spawn(async move { - if i % 2 == 0 { - ctx_clone.add_message("rw_test", "user", &format!("Concurrent {}", i)).await; - Ok(()) - } else { - ctx_clone.list_sessions().await; - Ok(()) - } - }); - handles.push(handle); - } - - let results: Vec<_> = futures::future::join_all(handles).await; - let success_count = results.iter().filter(|r| r.is_ok()).count(); - - assert_eq!(success_count, 20, "All concurrent operations should succeed"); - } -} - -// ==================== 单元测试: 分层访问 ==================== - -mod unit_layer_access_tests { - use super::*; - - /// 测试 L0 abstract 获取 - #[tokio::test] - async fn test_get_abstract() { - let ctx = test_utils::TestContext::new().await; - - let args = test_utils::StoreArgs { - content: "Content for abstract testing. This should be summarized.".to_string(), - thread_id: "".to_string(), - metadata: None, - auto_generate_layers: Some(true), - scope: "user".to_string(), - user_id: Some("abstract_test_user".to_string()), - agent_id: None, - }; - - let result = ctx.store(args).await; - - let abstract_result = ctx.get_abstract(&result.uri).await.unwrap(); - assert_eq!(abstract_result.layer, "L0"); - assert!(!abstract_result.abstract_text.is_empty()); - } - - /// 测试 L1 overview 获取 - #[tokio::test] - async fn test_get_overview() { - let ctx = test_utils::TestContext::new().await; - - let args = test_utils::StoreArgs { - content: "Content for overview testing. This should be expanded into an overview.".to_string(), - thread_id: "".to_string(), - metadata: None, - auto_generate_layers: Some(true), - scope: "user".to_string(), - user_id: Some("overview_test_user".to_string()), - agent_id: None, - }; - - let result = ctx.store(args).await; - - let overview_result = ctx.get_overview(&result.uri).await.unwrap(); - assert_eq!(overview_result.layer, "L1"); - assert!(!overview_result.overview_text.is_empty()); - } - - /// 测试 L2 完整内容获取 - #[tokio::test] - async fn test_get_read() { - let ctx = test_utils::TestContext::new().await; - - let original_content = "Original content for L2 read test."; - - let args = test_utils::StoreArgs { - content: original_content.to_string(), - thread_id: "".to_string(), - metadata: None, - auto_generate_layers: Some(false), - scope: "user".to_string(), - user_id: Some("read_test_user".to_string()), - agent_id: None, - }; - - let result = ctx.store(args).await; - - let read_result = ctx.get_read(&result.uri).await.unwrap(); - assert_eq!(read_result.layer, "L2"); - assert!(read_result.content.contains(original_content)); - } -} - -// ==================== 集成测试 (需要外部服务) ==================== - -mod integration_tests { - //! 集成测试 - 需要 Qdrant, LLM, Embedding 服务 - //! - //! 运行方式: - //! 1. 启动 Qdrant: docker run -p 6334:6334 qdrant/qdrant - //! 2. 配置环境变量: LLM_API_BASE_URL, LLM_API_KEY, EMBEDDING_API_BASE_URL, EMBEDDING_API_KEY - //! 3. 运行: cargo test -- --ignored integration - - /// 测试向量搜索 (需要 Qdrant 和 Embedding 服务) - #[tokio::test] - #[ignore] - async fn integration_test_vector_search() { - println!("Integration test: vector_search - requires Qdrant and Embedding service"); - } - - /// 测试 LLM 记忆提取 (需要 LLM 服务) - #[tokio::test] - #[ignore] - async fn integration_test_llm_extraction() { - println!("Integration test: llm_extraction - requires LLM service"); - } - - /// 测试完整的存储和检索流程 (需要全部外部服务) - #[tokio::test] - #[ignore] - async fn integration_test_full_workflow() { - println!("Integration test: full_workflow - requires all external services"); - } -} - -// ==================== 性能测试 ==================== - -mod performance_tests { - use super::*; - use std::time::Instant; - - /// 测试存储性能 - #[tokio::test] - async fn test_storage_performance() { - let ctx = test_utils::TestContext::new().await; - - let start = Instant::now(); - - for i in 0..100 { - ctx.add_message("perf_test", "user", &format!("Performance test message {}", i)).await; - } - - let duration = start.elapsed(); - println!("Storage of 100 messages took: {:?}", duration); - - assert!(duration.as_secs() < 10, "Storage took too long: {:?}", duration); - } - - /// 测试读取性能 - #[tokio::test] - async fn test_read_performance() { - let ctx = test_utils::TestContext::new().await; - - let args = test_utils::StoreArgs { - content: "Performance test content for reading.".to_string(), - thread_id: "".to_string(), - metadata: None, - auto_generate_layers: Some(false), - scope: "user".to_string(), - user_id: Some("read_perf_user".to_string()), - agent_id: None, - }; - let result = ctx.store(args).await; - - let start = Instant::now(); - - for _ in 0..100 { - ctx.read(&result.uri).await.unwrap(); - } - - let duration = start.elapsed(); - println!("100 reads took: {:?}", duration); - - assert!(duration.as_secs() < 10, "Reads took too long: {:?}", duration); - } - - /// 测试列表性能 - #[tokio::test] - async fn test_list_performance() { - let ctx = test_utils::TestContext::new().await; - - for i in 0..50 { - ctx.add_message(&format!("list_perf_{}", i), "user", "message").await; - } - - let start = Instant::now(); - - for _ in 0..100 { - ctx.list_sessions().await; - } - - let duration = start.elapsed(); - println!("100 list operations took: {:?}", duration); - - assert!(duration.as_secs() < 10, "List operations took too long: {:?}", duration); - } -} \ No newline at end of file diff --git a/cortex-mem-tools/tests/llm_integration_tests.rs b/cortex-mem-tools/tests/llm_integration_tests.rs deleted file mode 100644 index 7e4ccaa..0000000 --- a/cortex-mem-tools/tests/llm_integration_tests.rs +++ /dev/null @@ -1,513 +0,0 @@ -//! Cortex-Mem LLM 集成测试 -//! -//! 这些测试需要有效的 LLM 配置才能运行 -//! 配置来源:config.toml 或环境变量 -//! -//! 注意:这些测试需要外部服务(Qdrant, LLM, Embedding),默认被忽略。 -//! 要运行这些测试,请使用:cargo test -- --ignored - -#![allow(dead_code)] - -// 下面的测试需要外部服务,暂时禁用 -// 新的 API 需要以下依赖: -// - Qdrant 向量数据库 -// - LLM 服务(OpenAI 兼容 API) -// - Embedding 服务 - -/* -use cortex_mem_tools::{MemoryOperations, types::*}; -use std::sync::Arc; -use tempfile::TempDir; - -/// 检查是否有 LLM 配置 -fn has_llm_config() -> bool { - // 先尝试从 config.toml 加载(从多个可能的位置查找) - if load_llm_config_from_file().is_some() { - return true; - } - - // 或者检查环境变量 - std::env::var("LLM_API_BASE_URL").is_ok() && - std::env::var("LLM_API_KEY").is_ok() -} - -/// 从 config.toml 解析 LLM 配置 -fn load_llm_config_from_file() -> Option { - // 尝试从多个位置查找 config.toml - let possible_paths = [ - "config.toml", // 当前目录 - "../config.toml", // 上级目录(从 cortex-mem-tools 运行时) - "../../config.toml", // 上两级目录 - ]; - - let mut content = None; - let mut found_path = ""; - - for path in &possible_paths { - if let Ok(c) = std::fs::read_to_string(path) { - content = Some(c); - found_path = path; - break; - } - } - - let content = content?; - - // 检查是否有 [llm] 段落 - if !content.contains("[llm]") { - println!("⚠️ config.toml 中没有 [llm] 配置段落"); - return None; - } - - // 简单解析 TOML - let mut api_base_url = None; - let mut api_key = None; - let mut model = Some("gpt-3.5-turbo".to_string()); - let mut temperature = Some(0.1f32); - let mut max_tokens = Some(4096u32); - - let mut in_llm_section = false; - for line in content.lines() { - let trimmed = line.trim(); - - // 跳过空行 - if trimmed.is_empty() { - continue; - } - - // 检测 [llm] 段落开始 - if trimmed == "[llm]" { - in_llm_section = true; - continue; - } - - // 检测其他段落开始(结束 [llm] 段落) - if trimmed.starts_with('[') && in_llm_section { - break; - } - - // 在 [llm] 段落内 - if in_llm_section { - // 跳过注释行(以 # 开头) - if trimmed.starts_with('#') { - continue; - } - - // 解析 key = "value" 格式 - if let Some(eq_pos) = trimmed.find('=') { - let key = trimmed[..eq_pos].trim(); - let value_part = trimmed[eq_pos + 1..].trim(); - - // 跳过注释掉的配置(key 以 # 开头) - if key.starts_with('#') { - continue; - } - - // 移除引号 - let value = value_part - .trim_matches('"') - .trim_matches('\'') - .to_string(); - - match key { - "api_base_url" => api_base_url = Some(value), - "api_key" => api_key = Some(value), - "model_efficient" | "model" => model = Some(value), - "temperature" => temperature = value.parse().ok(), - "max_tokens" => max_tokens = value.parse().ok(), - _ => {} - } - } - } - } - - // 检查是否获取了必需的配置 - let api_url = api_base_url?; - let key = api_key?; - - // 检查值是否为空 - if api_url.is_empty() || key.is_empty() { - println!("⚠️ config.toml 中的 api_base_url 或 api_key 为空"); - return None; - } - - Some(cortex_mem_core::llm::LLMConfig { - api_base_url: api_url, - api_key: key, - model_efficient: model?, - temperature: temperature?, - max_tokens: max_tokens? as usize, - }) -} - -/// 加载 LLM 配置 -fn load_llm_config() -> Option { - // 优先从 config.toml 加载 - if let Some(config) = load_llm_config_from_file() { - return Some(config); - } - - // 从环境变量加载 - if let (Ok(api_url), Ok(api_key)) = ( - std::env::var("LLM_API_BASE_URL"), - std::env::var("LLM_API_KEY"), - ) { - return Some(cortex_mem_core::llm::LLMConfig { - api_base_url: api_url, - api_key, - model_efficient: std::env::var("LLM_MODEL").unwrap_or_else(|_| "gpt-3.5-turbo".to_string()), - temperature: 0.1, - max_tokens: 4096, - }); - } - - None -} - -/// 创建带 LLM 的测试环境 -async fn setup_llm_test_env() -> Option<(TempDir, MemoryOperations)> { - if !has_llm_config() { - return None; - } - - let llm_config = load_llm_config()?; - let llm_client = Arc::new( - cortex_mem_core::llm::LLMClientImpl::new(llm_config).ok()? - ); - - let temp_dir = TempDir::new().unwrap(); - let ops = MemoryOperations::with_tenant_and_llm( - temp_dir.path().to_str().unwrap(), - "llm_test_tenant", - llm_client, - ).await.ok()?; - - Some((temp_dir, ops)) -} - -// ==================== LLM 功能测试 ==================== - -mod llm_layer_tests { - use super::*; - - /// 测试 LLM 生成的高质量 L0 摘要 - #[tokio::test] - async fn test_llm_l0_quality() { - let env = setup_llm_test_env().await; - if env.is_none() { - println!("⚠️ 跳过测试:没有 LLM 配置"); - return; - } - - let (_temp_dir, ops) = env.unwrap(); - - // 使用需要理解的内容 - let content = r#"# Rust 所有权系统 - -Rust 的所有权系统是其最独特的特性之一。 - -## 核心规则 - -1. 每个值都有一个所有者 -2. 同一时间只能有一个所有者 -3. 当所有者离开作用域,值被丢弃 - -## 为什么重要 - -所有权让 Rust 能够在没有垃圾回收器的情况下保证内存安全,同时保持高性能。 - -## 实际应用 - -在系统编程、嵌入式开发、Web 后端等场景都有广泛应用。"#; - - let args = StoreArgs { - content: content.to_string(), - thread_id: "".to_string(), - metadata: None, - auto_generate_layers: Some(true), // 启用 LLM 生成 - scope: "user".to_string(), - user_id: Some("llm_l0_test".to_string()), - agent_id: None, - }; - - let start = std::time::Instant::now(); - let result = ops.store(args).await.unwrap(); - let duration = start.elapsed(); - - println!("✅ LLM L0 生成耗时: {:?}", duration); - println!("📄 存储 URI: {}", result.uri); - - // 获取 L0 摘要 - let l0 = ops.get_abstract(&result.uri).await.unwrap(); - println!("📝 L0 摘要 ({} tokens): {}", l0.token_count, l0.abstract_text); - - // 验证 L0 质量(使用字符数而不是 token 数,因为中文 token 计算不准确) - let char_count = l0.abstract_text.chars().count(); - println!("📝 L0 字符数: {}", char_count); - assert!(char_count > 20, "LLM 生成的 L0 应该有实质内容 ({} 字符)", char_count); - assert!(char_count < 2000, "L0 应该相对简洁 ({} 字符)", char_count); - - // 验证包含关键信息(LLM 应该提取出关键概念) - let has_keywords = l0.abstract_text.contains("所有权") || - l0.abstract_text.contains("Rust") || - l0.abstract_text.contains("内存安全") || - l0.abstract_text.contains("owner") || - l0.abstract_text.contains("memory"); - assert!(has_keywords, "L0 应该包含关键主题词: {}", l0.abstract_text); - } - - /// 测试 LLM 生成的 L1 概览 - #[tokio::test] - async fn test_llm_l1_quality() { - let env = setup_llm_test_env().await; - if env.is_none() { - println!("⚠️ 跳过测试:没有 LLM 配置"); - return; - } - - let (_temp_dir, ops) = env.unwrap(); - - let content = r#"# OAuth 2.0 认证框架 - -OAuth 2.0 是一种授权框架,允许第三方应用获取对用户资源的有限访问权限。 - -## 授权模式 - -### 1. 授权码模式 -最安全、最常用的模式,适用于有后端的应用。 - -### 2. 简化模式 -适用于纯前端应用。 - -### 3. 密码凭证模式 -用户直接向客户端提供用户名密码。 - -### 4. 客户端凭证模式 -用于服务器之间的通信。 - -## 安全考虑 - -- 使用 HTTPS -- 验证 redirect_uri -- 设置合理的令牌过期时间"#; - - let args = StoreArgs { - content: content.to_string(), - thread_id: "".to_string(), - metadata: None, - auto_generate_layers: Some(true), - scope: "user".to_string(), - user_id: Some("llm_l1_test".to_string()), - agent_id: None, - }; - - let start = std::time::Instant::now(); - let result = ops.store(args).await.unwrap(); - let duration = start.elapsed(); - - println!("✅ LLM L1 生成耗时: {:?}", duration); - - // 获取 L1 概览 - let l1 = ops.get_overview(&result.uri).await.unwrap(); - println!("📝 L1 概览 ({} tokens):", l1.token_count); - println!("{}", l1.overview_text); - - // 验证 L1 结构 - assert!(l1.token_count > 50, "L1 应该有详细内容"); - assert!(l1.overview_text.contains("#"), "L1 应该包含 Markdown 标题"); - - // 验证 L1 有实质内容(LLM 生成的可能比原文长,因为会扩展解释) - assert!( - l1.token_count > 100, - "L1 ({} tokens) 应该有详细内容", - l1.token_count - ); - } - - /// 对比 Fallback 和 LLM 生成的质量差异 - #[tokio::test] - async fn test_llm_vs_fallback_quality() { - let env = setup_llm_test_env().await; - if env.is_none() { - println!("⚠️ 跳过测试:没有 LLM 配置"); - return; - } - - let (temp_dir, ops_with_llm) = env.unwrap(); - - // 创建不带 LLM 的版本 - let ops_fallback = MemoryOperations::from_data_dir( - temp_dir.path().to_str().unwrap() - ).await.unwrap(); - - let content = "Rust 是一种系统编程语言,专注于安全、并发和性能。它通过所有权系统在没有垃圾回收器的情况下保证内存安全。"; - - // LLM 版本 - let llm_args = StoreArgs { - content: content.to_string(), - thread_id: "".to_string(), - metadata: None, - auto_generate_layers: Some(true), - scope: "user".to_string(), - user_id: Some("llm_compare".to_string()), - agent_id: None, - }; - - let llm_result = ops_with_llm.store(llm_args).await.unwrap(); - let llm_l0 = ops_with_llm.get_abstract(&llm_result.uri).await.unwrap(); - - // Fallback 版本 - let fallback_args = StoreArgs { - content: content.to_string(), - thread_id: "".to_string(), - metadata: None, - auto_generate_layers: Some(true), - scope: "user".to_string(), - user_id: Some("fallback_compare".to_string()), - agent_id: None, - }; - - let fallback_result = ops_fallback.store(fallback_args).await.unwrap(); - let fallback_l0 = ops_fallback.get_abstract(&fallback_result.uri).await.unwrap(); - - println!("🤖 LLM L0 ({} tokens): {}", llm_l0.token_count, llm_l0.abstract_text); - println!("📋 Fallback L0 ({} tokens): {}", fallback_l0.token_count, fallback_l0.abstract_text); - - // LLM 版本通常更智能(不一定是更短,但应该更有信息量) - println!("\n📊 对比: LLM {} tokens vs Fallback {} tokens", - llm_l0.token_count, fallback_l0.token_count); - } -} - -mod llm_memory_extraction_tests { - use super::*; - - /// 测试通过 close_session_sync 触发 LLM 记忆提取(MemoryEventCoordinator 路径) - #[tokio::test] - async fn test_llm_memory_extraction() { - let env = setup_llm_test_env().await; - if env.is_none() { - println!("⚠️ 跳过测试:没有 LLM 配置"); - return; - } - - let (_temp_dir, ops) = env.unwrap(); - - // 创建一个模拟对话 - let thread_id = "extraction_test"; - ops.add_message(thread_id, "user", "我喜欢用 Rust 编程,因为它内存安全且性能高。").await.unwrap(); - ops.add_message(thread_id, "assistant", "是的,Rust 的所有权系统确实很独特。你还喜欢其他什么编程语言?").await.unwrap(); - ops.add_message(thread_id, "user", "我也喜欢 Python,适合快速原型开发。").await.unwrap(); - - // 同步关闭 session,MemoryEventCoordinator 完成记忆提取和 L0/L1 生成后才返回 - ops.close_session_sync(thread_id).await.ok(); - - println!("✅ 对话已关闭,LLM 记忆提取已同步完成"); - - // 验证 session 存在 - let session = ops.get_session(thread_id).await; - assert!(session.is_ok(), "Session 应该存在"); - } -} - -mod llm_performance_tests { - use super::*; - - /// 测试 LLM API 调用性能 - #[tokio::test] - async fn test_llm_api_performance() { - let env = setup_llm_test_env().await; - if env.is_none() { - println!("⚠️ 跳过测试:没有 LLM 配置"); - return; - } - - let (_temp_dir, ops) = env.unwrap(); - - let content = "这是一段测试内容,用于测量 LLM API 调用的时间。"; - - let start = std::time::Instant::now(); - - let args = StoreArgs { - content: content.to_string(), - thread_id: "".to_string(), - metadata: None, - auto_generate_layers: Some(true), - scope: "user".to_string(), - user_id: Some("perf_test".to_string()), - agent_id: None, - }; - - let result = ops.store(args).await.unwrap(); - let duration = start.elapsed(); - - println!("⏱️ LLM 生成 L0/L1 总耗时: {:?}", duration); - println!("📄 URI: {}", result.uri); - - // 通常 LLM 调用需要 1-5 秒 - assert!(duration.as_secs() < 30, "LLM 生成应在 30 秒内完成"); - } - - /// 批量 LLM 生成测试 - #[tokio::test] - async fn test_batch_llm_generation() { - let env = setup_llm_test_env().await; - if env.is_none() { - println!("⚠️ 跳过测试:没有 LLM 配置"); - return; - } - - let (_temp_dir, ops) = env.unwrap(); - - let contents = vec![ - "Rust 所有权系统介绍...", - "OAuth 2.0 认证流程说明...", - "PostgreSQL 数据库优化技巧...", - ]; - - let start = std::time::Instant::now(); - - for (i, content) in contents.iter().enumerate() { - let args = StoreArgs { - content: content.to_string(), - thread_id: "".to_string(), - metadata: None, - auto_generate_layers: Some(true), - scope: "user".to_string(), - user_id: Some(format!("batch_user_{}", i)), - agent_id: None, - }; - - let result = ops.store(args).await.unwrap(); - println!("✅ 第 {} 个完成: {}", i + 1, result.uri); - } - - let duration = start.elapsed(); - println!("⏱️ 批量 {} 个 LLM 生成总耗时: {:?}", contents.len(), duration); - - // 批量生成可能需要更长时间(取决于 API 响应速度) - assert!(duration.as_secs() < 180, "批量 LLM 生成应在 3 分钟内完成"); - } -} - -// ==================== 使用说明 ==================== -// -// 运行这些测试需要配置 LLM API: -// -// 方式 1: 使用 config.toml(推荐) -// 确保项目根目录有 config.toml 且包含: -// [llm] -// api_base_url = "https://your-api-endpoint.com/v1" -// api_key = "your-api-key" -// model_efficient = "gpt-3.5-turbo" -// -// 方式 2: 使用环境变量 -// export LLM_API_BASE_URL="https://your-api-endpoint.com/v1" -// export LLM_API_KEY="your-api-key" -// export LLM_MODEL="gpt-3.5-turbo" -// -// 然后运行测试: -// cargo test -p cortex-mem-tools --test llm_integration_tests -- --ignored -// -// 如果没有配置,测试会自动跳过并显示警告 -*/ From f4ec5c0b39bd8e88d01d84563d59ee29a3a87342 Mon Sep 17 00:00:00 2001 From: Sopaco Date: Tue, 10 Mar 2026 18:53:35 +0800 Subject: [PATCH 10/20] Create cli_commands_test.rs --- cortex-mem-cli/tests/cli_commands_test.rs | 1197 +++++++++++++++++++++ 1 file changed, 1197 insertions(+) create mode 100644 cortex-mem-cli/tests/cli_commands_test.rs diff --git a/cortex-mem-cli/tests/cli_commands_test.rs b/cortex-mem-cli/tests/cli_commands_test.rs new file mode 100644 index 0000000..513426b --- /dev/null +++ b/cortex-mem-cli/tests/cli_commands_test.rs @@ -0,0 +1,1197 @@ +//! cortex-mem-cli 命令功能测试 +//! +//! # 测试分类 +//! +//! ## 1. 基础命令测试 (无需外部服务) +//! - `--help`:检验帮助信息输出 +//! - `--version`:检验版本号输出 +//! +//! ## 2. Tenant 命令测试 (仅需配置文件和本地文件系统) +//! - `tenant list`:列出租户,使用临时目录 +//! +//! ## 3. 参数验证测试 (无需外部服务) +//! - 缺少必要参数时的错误提示 +//! - 非法参数值的错误提示(如 min_score > 1.0) +//! - 非法 URI scheme 的错误提示 +//! +//! ## 4. 完整功能测试 (需要 Qdrant + LLM + Embedding,标记为 #[ignore]) +//! - `add`:添加消息 +//! - `list`:列出记忆 +//! - `get`:获取单条记忆 +//! - `delete`:删除记忆 +//! - `search`:语义搜索 +//! - `session list/create/close`:会话管理 +//! - `stats`:统计信息 +//! - `layers status/ensure-all/regenerate-oversized`:层文件管理 +//! +//! # 运行方式 +//! +//! ```bash +//! # 只运行不依赖外部服务的测试 +//! cargo test -p cortex-mem-cli +//! +//! # 运行全部测试(需要配置好 Qdrant + LLM + Embedding) +//! cargo test -p cortex-mem-cli -- --include-ignored +//! +//! # 通过环境变量指定配置 +//! CONFIG_PATH=/path/to/config.toml TENANT_ID=my-tenant cargo test -p cortex-mem-cli -- --include-ignored +//! ``` + +use assert_cmd::Command; +use predicates::prelude::*; +use std::fs; +use tempfile::TempDir; + +// ─── 辅助函数 ──────────────────────────────────────────────────────────────── + +/// 获取 cortex-mem CLI 命令 +fn cli() -> Command { + // 使用 Command::new + CARGO_BIN_EXE 方式,兼容自定义 build-dir + Command::new(env!("CARGO_BIN_EXE_cortex-mem")) +} + +/// 从环境变量读取配置路径,如未设置则使用 workspace 根目录的 config.toml +/// +/// 注意:cargo test 的工作目录是 crate 目录(cortex-mem-cli/),而非 workspace 根目录。 +/// 因此需要将相对路径解析为基于 CARGO_MANIFEST_DIR 父目录的绝对路径。 +fn config_path() -> String { + match std::env::var("CONFIG_PATH") { + Ok(p) => { + // 环境变量提供的路径:如果是相对路径,则相对于 workspace 根目录(CARGO_MANIFEST_DIR 的父目录) + let path = std::path::Path::new(&p); + if path.is_absolute() { + p + } else { + // CARGO_MANIFEST_DIR = cortex-mem-cli/,其父目录 = workspace 根目录 + let workspace_root = std::path::Path::new(env!("CARGO_MANIFEST_DIR")) + .parent() + .expect("CARGO_MANIFEST_DIR has no parent"); + workspace_root.join(path).to_string_lossy().to_string() + } + } + Err(_) => { + // 默认使用 workspace 根目录下的 config.toml + let workspace_root = std::path::Path::new(env!("CARGO_MANIFEST_DIR")) + .parent() + .expect("CARGO_MANIFEST_DIR has no parent"); + workspace_root + .join("config.toml") + .to_string_lossy() + .to_string() + } + } +} + +/// 从环境变量读取 Tenant ID,如未设置则使用默认值 +fn tenant_id() -> String { + std::env::var("TENANT_ID") + .unwrap_or_else(|_| "default".to_string()) +} + +/// 创建临时数据目录,并在其中生成一个最简配置文件(不含外部服务配置) +/// 该函数用于不需要向量数据库的测试场景 +fn setup_temp_env() -> (TempDir, String) { + let tmp = TempDir::new().expect("Failed to create temp dir"); + let data_dir = tmp.path().join("cortex-data"); + fs::create_dir_all(&data_dir).expect("Failed to create data dir"); + + // 创建 tenants 目录结构(用于 tenant list 测试) + let tenants_dir = data_dir.join("tenants"); + fs::create_dir_all(&tenants_dir).expect("Failed to create tenants dir"); + fs::create_dir_all(tenants_dir.join("tenant-alpha")).expect("Failed to create tenant dir"); + fs::create_dir_all(tenants_dir.join("tenant-beta")).expect("Failed to create tenant dir"); + + // 生成最小化 config.toml(包含所有必需字段,但 URL 指向本地不存在的服务) + let config_content = format!( + r#"[qdrant] +url = "http://localhost:16334" +collection_name = "test-cortex-mem" +embedding_dim = 256 +timeout_secs = 5 +api_key = "" + +[embedding] +api_base_url = "http://localhost:18080" +api_key = "test-key" +model_name = "test-model" +batch_size = 10 +timeout_secs = 5 + +[llm] +api_base_url = "http://localhost:18080" +api_key = "test-key" +model_efficient = "test-model" +temperature = 0.1 +max_tokens = 4096 + +[server] +host = "127.0.0.1" +port = 3000 +cors_origins = ["*"] + +[cortex] +data_dir = "{data_dir}" + +[logging] +enabled = false +log_directory = "logs" +level = "error" +"#, + data_dir = data_dir.display() + ); + + let config_path = tmp.path().join("config.toml"); + fs::write(&config_path, &config_content).expect("Failed to write config file"); + + let config_str = config_path.to_string_lossy().to_string(); + (tmp, config_str) +} + +// ─── 1. 基础命令测试 ───────────────────────────────────────────────────────── + +/// B01: --help 输出应包含程序名称和使用说明 +#[test] +fn test_help_command() { + cli() + .arg("--help") + .assert() + .success() + .stdout(predicate::str::contains("cortex-mem")) + .stdout(predicate::str::contains("Usage")); +} + +/// B02: --version 输出应包含二进制名称 +#[test] +fn test_version_command() { + cli() + .arg("--version") + .assert() + .success() + .stdout(predicate::str::contains("cortex-mem")); +} + +/// B03: add 子命令的 --help 应包含参数说明 +#[test] +fn test_add_subcommand_help() { + cli() + .args(["add", "--help"]) + .assert() + .success() + .stdout(predicate::str::contains("thread").or(predicate::str::contains("content"))); +} + +/// B04: search 子命令的 --help 应包含查询参数说明 +#[test] +fn test_search_subcommand_help() { + cli() + .args(["search", "--help"]) + .assert() + .success() + .stdout(predicate::str::contains("query").or(predicate::str::contains("limit"))); +} + +/// B05: session 子命令的 --help 应包含子命令说明 +#[test] +fn test_session_subcommand_help() { + cli() + .args(["session", "--help"]) + .assert() + .success() + .stdout( + predicate::str::contains("list") + .or(predicate::str::contains("create")) + .or(predicate::str::contains("close")), + ); +} + +/// B06: layers 子命令的 --help 应包含子命令说明 +#[test] +fn test_layers_subcommand_help() { + cli() + .args(["layers", "--help"]) + .assert() + .success() + .stdout( + predicate::str::contains("status") + .or(predicate::str::contains("ensure-all")) + .or(predicate::str::contains("regenerate-oversized")), + ); +} + +/// B07: tenant 子命令的 --help 应包含 list 子命令 +#[test] +fn test_tenant_subcommand_help() { + cli() + .args(["tenant", "--help"]) + .assert() + .success() + .stdout(predicate::str::contains("list")); +} + +/// B08: get 子命令的 --help 应包含 URI 参数说明 +#[test] +fn test_get_subcommand_help() { + cli() + .args(["get", "--help"]) + .assert() + .success() + .stdout(predicate::str::contains("uri").or(predicate::str::contains("URI"))); +} + +/// B09: list 子命令的 --help 应包含相关参数说明 +#[test] +fn test_list_subcommand_help() { + cli() + .args(["list", "--help"]) + .assert() + .success() + .stdout(predicate::str::contains("uri").or(predicate::str::contains("URI"))); +} + +/// B10: delete 子命令的 --help 应包含 URI 参数说明 +#[test] +fn test_delete_subcommand_help() { + cli() + .args(["delete", "--help"]) + .assert() + .success() + .stdout(predicate::str::contains("uri").or(predicate::str::contains("URI"))); +} + +/// B11: stats 子命令的 --help 应成功 +#[test] +fn test_stats_subcommand_help() { + cli() + .args(["stats", "--help"]) + .assert() + .success(); +} + +// ─── 2. Tenant 命令测试 ────────────────────────────────────────────────────── + +/// T01: tenant list 在有租户目录时应列出所有租户 +#[test] +fn test_tenant_list_with_tenants() { + let (_tmp, config) = setup_temp_env(); + + cli() + .args(["-c", &config, "tenant", "list"]) + .assert() + .success() + .stdout(predicate::str::contains("tenant-alpha")) + .stdout(predicate::str::contains("tenant-beta")); +} + +/// T02: tenant list 在无租户目录时应给出友好提示 +#[test] +fn test_tenant_list_empty() { + let tmp = TempDir::new().expect("Failed to create temp dir"); + let data_dir = tmp.path().join("cortex-data-empty"); + fs::create_dir_all(&data_dir).expect("Failed to create data dir"); + + let config_content = format!( + r#"[qdrant] +url = "http://localhost:16334" +collection_name = "test-cortex-mem" +embedding_dim = 256 +timeout_secs = 5 +api_key = "" + +[embedding] +api_base_url = "http://localhost:18080" +api_key = "test-key" +model_name = "test-model" +batch_size = 10 +timeout_secs = 5 + +[llm] +api_base_url = "http://localhost:18080" +api_key = "test-key" +model_efficient = "test-model" +temperature = 0.1 +max_tokens = 4096 + +[server] +host = "127.0.0.1" +port = 3000 +cors_origins = ["*"] + +[cortex] +data_dir = "{data_dir}" + +[logging] +enabled = false +log_directory = "logs" +level = "error" +"#, + data_dir = data_dir.display() + ); + + let config_path = tmp.path().join("config.toml"); + fs::write(&config_path, &config_content).expect("Failed to write config"); + + cli() + .args(["-c", &config_path.to_string_lossy(), "tenant", "list"]) + .assert() + .success() + .stdout(predicate::str::contains("No tenants").or(predicate::str::contains("found"))); +} + +/// T03: tenant list 使用不存在的配置文件时应以错误退出 +#[test] +fn test_tenant_list_missing_config() { + cli() + .args(["-c", "/nonexistent/path/config.toml", "tenant", "list"]) + .assert() + .failure(); +} + +// ─── 3. 参数验证测试 ───────────────────────────────────────────────────────── + +/// V01: add 命令缺少必要参数 --thread 时应以错误退出 +#[test] +fn test_add_missing_thread_arg() { + cli() + .args(["add", "some content"]) + .assert() + .failure(); +} + +/// V02: add 命令缺少 content 位置参数时应以错误退出 +#[test] +fn test_add_missing_content_arg() { + cli() + .args(["add", "--thread", "my-thread"]) + .assert() + .failure(); +} + +/// V03: search 命令缺少 query 位置参数时应以错误退出 +#[test] +fn test_search_missing_query_arg() { + cli() + .args(["search"]) + .assert() + .failure(); +} + +/// V04: get 命令缺少 URI 位置参数时应以错误退出 +#[test] +fn test_get_missing_uri_arg() { + cli() + .args(["get"]) + .assert() + .failure(); +} + +/// V05: delete 命令缺少 URI 位置参数时应以错误退出 +#[test] +fn test_delete_missing_uri_arg() { + cli() + .args(["delete"]) + .assert() + .failure(); +} + +/// V06: session create 缺少 thread 参数时应以错误退出 +#[test] +fn test_session_create_missing_thread() { + cli() + .args(["session", "create"]) + .assert() + .failure(); +} + +/// V07: session close 缺少 thread 参数时应以错误退出 +#[test] +fn test_session_close_missing_thread() { + cli() + .args(["session", "close"]) + .assert() + .failure(); +} + +/// V08: search 命令的 --min-score 参数超出范围(>1.0)应以错误退出 +/// 注意:参数验证发生在 MemoryOperations 初始化之后,因此该测试需要外部服务 +#[test] +#[ignore = "参数验证位于 MemoryOperations 初始化之后,需要外部服务才能到达验证逻辑"] +fn test_search_invalid_min_score_over_limit() { + let config = config_path(); + let tenant = tenant_id(); + + cli() + .args([ + "-c", + &config, + "--tenant", + &tenant, + "search", + "test query", + "-s", + "2.0", + ]) + .assert() + .failure() + .stderr(predicate::str::contains("min_score must be between").or( + predicate::str::contains("between 0.0 and 1.0"), + )); +} + +/// V09: get 命令使用无效的 URI scheme 时应以错误退出 +/// 注意:URI 验证发生在 MemoryOperations 初始化之后,因此该测试需要外部服务 +#[test] +#[ignore = "URI 验证位于 MemoryOperations 初始化之后,需要外部服务才能到达验证逻辑"] +fn test_get_invalid_uri_scheme() { + let config = config_path(); + let tenant = tenant_id(); + + cli() + .args([ + "-c", + &config, + "--tenant", + &tenant, + "get", + "http://invalid-scheme/path", + ]) + .assert() + .failure() + .stderr(predicate::str::contains("Invalid URI scheme").or( + predicate::str::contains("invalid").or(predicate::str::contains("error")), + )); +} + +/// V10: list 命令不带任何选项时应以错误退出(需要配置才能初始化 MemoryOperations) +/// 这验证了配置文件缺失时的错误处理 +#[test] +fn test_list_no_config_fails() { + cli() + .args(["-c", "/tmp/nonexistent_config_xyzabc.toml", "list"]) + .assert() + .failure(); +} + +// ─── 4. 完整功能测试 (需要外部服务,标记 #[ignore]) ───────────────────────── +// +// 运行方式: +// CONFIG_PATH=/path/to/config.toml TENANT_ID= \ +// cargo test -p cortex-mem-cli -- --include-ignored +// +// 环境变量: +// CONFIG_PATH - 配置文件路径 (默认: config.toml) +// TENANT_ID - 租户 ID (默认: default) + +/// F01: add 命令 - 添加用户消息后应打印成功信息和 URI +#[test] +#[ignore = "需要外部服务 (Qdrant + LLM + Embedding),请配置环境变量后运行"] +fn test_add_user_message() { + let config = config_path(); + let tenant = tenant_id(); + let thread_id = format!("cli-test-add-{}", uuid_short()); + + cli() + .args([ + "-c", + &config, + "--tenant", + &tenant, + "add", + "--thread", + &thread_id, + "--role", + "user", + "Hello, this is a test message from cli test", + ]) + .assert() + .success() + .stdout(predicate::str::contains("successfully").or(predicate::str::contains("✓"))) + .stdout(predicate::str::contains("cortex://session")); +} + +/// F02: add 命令 - 添加助手消息后应打印成功信息 +#[test] +#[ignore = "需要外部服务 (Qdrant + LLM + Embedding),请配置环境变量后运行"] +fn test_add_assistant_message() { + let config = config_path(); + let tenant = tenant_id(); + let thread_id = format!("cli-test-add-asst-{}", uuid_short()); + + cli() + .args([ + "-c", + &config, + "--tenant", + &tenant, + "add", + "--thread", + &thread_id, + "--role", + "assistant", + "This is an assistant response for testing", + ]) + .assert() + .success() + .stdout(predicate::str::contains("successfully").or(predicate::str::contains("✓"))); +} + +/// F03: list 命令 - 列出默认 URI (cortex://session) +#[test] +#[ignore = "需要外部服务 (Qdrant + LLM + Embedding),请配置环境变量后运行"] +fn test_list_default_uri() { + let config = config_path(); + let tenant = tenant_id(); + + cli() + .args(["-c", &config, "--tenant", &tenant, "list"]) + .assert() + .success(); +} + +/// F04: list 命令 - 列出 user 维度 +#[test] +#[ignore = "需要外部服务 (Qdrant + LLM + Embedding),请配置环境变量后运行"] +fn test_list_user_dimension() { + let config = config_path(); + let tenant = tenant_id(); + + cli() + .args([ + "-c", + &config, + "--tenant", + &tenant, + "list", + "--uri", + "cortex://user", + ]) + .assert() + .success() + .stdout(predicate::str::contains("Found").or(predicate::str::contains("No memories"))); +} + +/// F05: list 命令 - 添加消息后列出该会话的内容 +#[test] +#[ignore = "需要外部服务 (Qdrant + LLM + Embedding),请配置环境变量后运行"] +fn test_list_after_add() { + let config = config_path(); + let tenant = tenant_id(); + let thread_id = format!("cli-test-list-{}", uuid_short()); + + // 先添加一条消息 + cli() + .args([ + "-c", + &config, + "--tenant", + &tenant, + "add", + "--thread", + &thread_id, + "Test message for list verification", + ]) + .assert() + .success(); + + // 然后列出该会话 + cli() + .args([ + "-c", + &config, + "--tenant", + &tenant, + "list", + "--uri", + &format!("cortex://session/{}", thread_id), + ]) + .assert() + .success() + .stdout(predicate::str::contains("Found").or(predicate::str::contains("item"))); +} + +/// F06: get 命令 - 先 add 再 get 该 URI 的内容 +#[test] +#[ignore = "需要外部服务 (Qdrant + LLM + Embedding),请配置环境变量后运行"] +fn test_get_after_add() { + let config = config_path(); + let tenant = tenant_id(); + let thread_id = format!("cli-test-get-{}", uuid_short()); + let unique_content = format!("Unique test content {}", uuid_short()); + + // 先添加消息 + let add_output = cli() + .args([ + "-c", + &config, + "--tenant", + &tenant, + "add", + "--thread", + &thread_id, + &unique_content, + ]) + .assert() + .success() + .get_output() + .stdout + .clone(); + + // 从输出中提取 URI(形如 cortex://session/...) + let output_str = String::from_utf8_lossy(&add_output); + let uri = extract_uri_from_output(&output_str); + + if let Some(uri) = uri { + // 使用 get 获取内容 + cli() + .args(["-c", &config, "--tenant", &tenant, "get", &uri]) + .assert() + .success() + .stdout(predicate::str::contains(unique_content)); + } else { + // URI 提取失败时直接通过(不阻塞 CI) + println!("WARN: Could not extract URI from add output, skipping get check"); + } +} + +/// F07: get 命令 - --abstract-only 选项应返回 L0 层内容 +#[test] +#[ignore = "需要外部服务 (Qdrant + LLM + Embedding),请配置环境变量后运行"] +fn test_get_abstract_only() { + let config = config_path(); + let tenant = tenant_id(); + let thread_id = format!("cli-test-abstract-{}", uuid_short()); + + cli() + .args([ + "-c", + &config, + "--tenant", + &tenant, + "add", + "--thread", + &thread_id, + "Content to test abstract layer retrieval", + ]) + .assert() + .success(); + + // 列出会话以获取 URI + let list_output = cli() + .args([ + "-c", + &config, + "--tenant", + &tenant, + "list", + "--uri", + &format!("cortex://session/{}", thread_id), + ]) + .assert() + .success() + .get_output() + .stdout + .clone(); + + let output_str = String::from_utf8_lossy(&list_output); + println!("List output: {}", output_str); + // 注:只验证命令能正常执行,具体内容由 L0 层生成逻辑决定 +} + +/// F08: delete 命令 - 先 add 再 delete 应成功 +#[test] +#[ignore = "需要外部服务 (Qdrant + LLM + Embedding),请配置环境变量后运行"] +fn test_delete_after_add() { + let config = config_path(); + let tenant = tenant_id(); + let thread_id = format!("cli-test-delete-{}", uuid_short()); + + // 先添加消息并获取 URI + let add_output = cli() + .args([ + "-c", + &config, + "--tenant", + &tenant, + "add", + "--thread", + &thread_id, + "Message to be deleted", + ]) + .assert() + .success() + .get_output() + .stdout + .clone(); + + let output_str = String::from_utf8_lossy(&add_output); + let uri = extract_uri_from_output(&output_str); + + if let Some(uri) = uri { + // 删除该 URI + cli() + .args(["-c", &config, "--tenant", &tenant, "delete", &uri]) + .assert() + .success() + .stdout( + predicate::str::contains("deleted").or(predicate::str::contains("successfully")), + ); + } else { + println!("WARN: Could not extract URI from add output, skipping delete check"); + } +} + +/// F09: search 命令 - 基本搜索 +/// +/// search 命令需要调用 Embedding API 将查询转为向量,若 Embedding 服务不可达则会失败。 +/// 本测试验证命令能够正常执行(参数解析正确),对 Embedding 服务的可用性不做强依赖要求。 +#[test] +#[ignore = "需要外部服务 (Qdrant + LLM + Embedding),请配置环境变量后运行"] +fn test_search_basic() { + let config = config_path(); + let tenant = tenant_id(); + + let output = cli() + .args([ + "-c", + &config, + "--tenant", + &tenant, + "search", + "test query", + ]) + .output() + .expect("Failed to run command"); + + let stdout = String::from_utf8_lossy(&output.stdout); + let stderr = String::from_utf8_lossy(&output.stderr); + + // 如果成功:stdout 中应有 "Found" 或 "results" + // 如果失败:允许因 Embedding 服务不可达而失败(网络/服务错误),但不应是参数校验失败 + if output.status.success() { + assert!( + stdout.contains("Found") || stdout.contains("results") || stdout.contains("0 results"), + "Expected search result output, got: {}", + stdout + ); + } else { + // 允许因网络/服务不可用而失败,但不应是命令解析错误 + let is_network_or_service_error = stderr.contains("Embedding error") + || stderr.contains("HTTP request failed") + || stderr.contains("connection refused") + || stderr.contains("Vector store error") + || stderr.contains("tonic::transport"); + assert!( + is_network_or_service_error, + "Unexpected failure (not a network/service error): stderr={}", + stderr + ); + println!("INFO: search failed due to service unavailability (acceptable): {}", stderr.trim()); + } +} + +/// F10: search 命令 - 指定 limit 和 min_score 参数 +#[test] +#[ignore = "需要外部服务 (Qdrant + LLM + Embedding),请配置环境变量后运行"] +fn test_search_with_options() { + let config = config_path(); + let tenant = tenant_id(); + + let output = cli() + .args([ + "-c", + &config, + "--tenant", + &tenant, + "search", + "test query", + "--limit", + "5", + "--min-score", + "0.5", + ]) + .output() + .expect("Failed to run command"); + + let stderr = String::from_utf8_lossy(&output.stderr); + // 成功或因服务不可达失败均可接受 + if !output.status.success() { + let is_service_error = stderr.contains("Embedding error") + || stderr.contains("HTTP request failed") + || stderr.contains("connection refused") + || stderr.contains("Vector store error"); + assert!(is_service_error, "Unexpected failure: {}", stderr); + } +} + +/// F11: search 命令 - 指定 scope 为 user 维度 +#[test] +#[ignore = "需要外部服务 (Qdrant + LLM + Embedding),请配置环境变量后运行"] +fn test_search_user_scope() { + let config = config_path(); + let tenant = tenant_id(); + + let output = cli() + .args([ + "-c", + &config, + "--tenant", + &tenant, + "search", + "user preference query", + "--scope", + "user", + ]) + .output() + .expect("Failed to run command"); + + let stderr = String::from_utf8_lossy(&output.stderr); + if !output.status.success() { + let is_service_error = stderr.contains("Embedding error") + || stderr.contains("HTTP request failed") + || stderr.contains("connection refused") + || stderr.contains("Vector store error"); + assert!(is_service_error, "Unexpected failure: {}", stderr); + } +} + +/// F12: search 命令 - 指定 --thread 限制在某个会话内搜索 +#[test] +#[ignore = "需要外部服务 (Qdrant + LLM + Embedding),请配置环境变量后运行"] +fn test_search_in_thread() { + let config = config_path(); + let tenant = tenant_id(); + let thread_id = format!("cli-test-search-{}", uuid_short()); + + // 先添加内容 + cli() + .args([ + "-c", + &config, + "--tenant", + &tenant, + "add", + "--thread", + &thread_id, + "Rust programming language features", + ]) + .assert() + .success(); + + // 在该 thread 内搜索(允许因 Embedding 服务不可达而失败) + let search_output = cli() + .args([ + "-c", + &config, + "--tenant", + &tenant, + "search", + "Rust", + "--thread", + &thread_id, + ]) + .output() + .expect("Failed to run search command"); + + let search_stderr = String::from_utf8_lossy(&search_output.stderr); + if !search_output.status.success() { + let is_service_error = search_stderr.contains("Embedding error") + || search_stderr.contains("HTTP request failed") + || search_stderr.contains("connection refused") + || search_stderr.contains("Vector store error"); + assert!(is_service_error, "Unexpected search failure: {}", search_stderr); + println!("INFO: search in thread failed due to service unavailability: {}", search_stderr.trim()); + } +} + +/// F13: session list 命令 - 应列出会话(可能为空) +#[test] +#[ignore = "需要外部服务 (Qdrant + LLM + Embedding),请配置环境变量后运行"] +fn test_session_list() { + let config = config_path(); + let tenant = tenant_id(); + + cli() + .args(["-c", &config, "--tenant", &tenant, "session", "list"]) + .assert() + .success() + .stdout( + predicate::str::contains("sessions") + .or(predicate::str::contains("No sessions")) + .or(predicate::str::contains("Found")), + ); +} + +/// F14: session create 命令 - 创建新会话 +#[test] +#[ignore = "需要外部服务 (Qdrant + LLM + Embedding),请配置环境变量后运行"] +fn test_session_create() { + let config = config_path(); + let tenant = tenant_id(); + let thread_id = format!("cli-test-session-create-{}", uuid_short()); + + cli() + .args([ + "-c", + &config, + "--tenant", + &tenant, + "session", + "create", + &thread_id, + ]) + .assert() + .success() + .stdout(predicate::str::contains("created").or(predicate::str::contains("✓"))); +} + +/// F15: session create 命令 - 指定 --title 选项 +#[test] +#[ignore = "需要外部服务 (Qdrant + LLM + Embedding),请配置环境变量后运行"] +fn test_session_create_with_title() { + let config = config_path(); + let tenant = tenant_id(); + let thread_id = format!("cli-test-session-titled-{}", uuid_short()); + + cli() + .args([ + "-c", + &config, + "--tenant", + &tenant, + "session", + "create", + &thread_id, + "--title", + "My Test Session", + ]) + .assert() + .success() + .stdout(predicate::str::contains("created").or(predicate::str::contains("✓"))); +} + +/// F16: session close 命令 - 先 create 再 close +#[test] +#[ignore = "需要外部服务 (Qdrant + LLM + Embedding),请配置环境变量后运行"] +fn test_session_close_after_create() { + let config = config_path(); + let tenant = tenant_id(); + let thread_id = format!("cli-test-session-close-{}", uuid_short()); + + // 创建会话并添加消息 + cli() + .args([ + "-c", + &config, + "--tenant", + &tenant, + "add", + "--thread", + &thread_id, + "A test message before closing session", + ]) + .assert() + .success(); + + // 关闭会话(触发记忆提取流水线) + cli() + .args([ + "-c", + &config, + "--tenant", + &tenant, + "session", + "close", + &thread_id, + ]) + .assert() + .success() + .stdout(predicate::str::contains("closed").or(predicate::str::contains("completed"))); +} + +/// F17: stats 命令 - 统计信息应包含维度数据 +#[test] +#[ignore = "需要外部服务 (Qdrant + LLM + Embedding),请配置环境变量后运行"] +fn test_stats() { + let config = config_path(); + let tenant = tenant_id(); + + cli() + .args(["-c", &config, "--tenant", &tenant, "stats"]) + .assert() + .success() + .stdout(predicate::str::contains("Statistics").or(predicate::str::contains("Sessions"))); +} + +/// F18: layers status 命令 - 显示 L0/L1 文件覆盖状态 +#[test] +#[ignore = "需要外部服务 (Qdrant + LLM + Embedding),请配置环境变量后运行"] +fn test_layers_status() { + let config = config_path(); + let tenant = tenant_id(); + + cli() + .args(["-c", &config, "--tenant", &tenant, "layers", "status"]) + .assert() + .success() + .stdout( + predicate::str::contains("Layer file status") + .or(predicate::str::contains("Total directories")), + ); +} + +/// F19: layers ensure-all 命令 - 为所有缺失目录生成 L0/L1 文件 +#[test] +#[ignore = "需要外部服务 (Qdrant + LLM + Embedding),请配置环境变量后运行"] +fn test_layers_ensure_all() { + let config = config_path(); + let tenant = tenant_id(); + + cli() + .args(["-c", &config, "--tenant", &tenant, "layers", "ensure-all"]) + .assert() + .success() + .stdout(predicate::str::contains("Statistics").or(predicate::str::contains("Generated"))); +} + +/// F20: layers regenerate-oversized 命令 - 重新生成超大 .abstract 文件 +#[test] +#[ignore = "需要外部服务 (Qdrant + LLM + Embedding),请配置环境变量后运行"] +fn test_layers_regenerate_oversized() { + let config = config_path(); + let tenant = tenant_id(); + + cli() + .args([ + "-c", + &config, + "--tenant", + &tenant, + "layers", + "regenerate-oversized", + ]) + .assert() + .success() + .stdout( + predicate::str::contains("Statistics") + .or(predicate::str::contains("Oversized")) + .or(predicate::str::contains("All .abstract files")), + ); +} + +/// F21: verbose 模式 - --verbose 选项不应导致命令失败 +#[test] +#[ignore = "需要外部服务 (Qdrant + LLM + Embedding),请配置环境变量后运行"] +fn test_verbose_mode() { + let config = config_path(); + let tenant = tenant_id(); + + cli() + .args([ + "-c", + &config, + "--tenant", + &tenant, + "--verbose", + "session", + "list", + ]) + .assert() + .success(); +} + +/// F22: 完整工作流 - add → list → get → search → delete +#[test] +#[ignore = "需要外部服务 (Qdrant + LLM + Embedding),请配置环境变量后运行"] +fn test_full_workflow() { + let config = config_path(); + let tenant = tenant_id(); + let thread_id = format!("cli-test-workflow-{}", uuid_short()); + let content = format!("Workflow test content {}", uuid_short()); + + // Step 1: add 消息 + let add_output = cli() + .args([ + "-c", + &config, + "--tenant", + &tenant, + "add", + "--thread", + &thread_id, + &content, + ]) + .assert() + .success() + .get_output() + .stdout + .clone(); + + let output_str = String::from_utf8_lossy(&add_output); + println!("Add output: {}", output_str); + + // Step 2: list 该会话内容 + cli() + .args([ + "-c", + &config, + "--tenant", + &tenant, + "list", + "--uri", + &format!("cortex://session/{}", thread_id), + ]) + .assert() + .success(); + + // Step 3: search 刚添加的内容 + cli() + .args([ + "-c", + &config, + "--tenant", + &tenant, + "search", + "Workflow test", + "--thread", + &thread_id, + ]) + .assert() + .success(); + + // Step 4: stats 查看统计 + cli() + .args(["-c", &config, "--tenant", &tenant, "stats"]) + .assert() + .success(); +} + +// ─── 辅助函数 ──────────────────────────────────────────────────────────────── + +/// 生成短 UUID(8 个字符)用于测试隔离 +fn uuid_short() -> String { + use std::collections::hash_map::DefaultHasher; + use std::hash::{Hash, Hasher}; + use std::time::SystemTime; + + let mut hasher = DefaultHasher::new(); + SystemTime::now().hash(&mut hasher); + std::thread::current().id().hash(&mut hasher); + format!("{:08x}", hasher.finish()) +} + +/// 从命令输出中提取第一个 `cortex://` URI +fn extract_uri_from_output(output: &str) -> Option { + output + .lines() + .find_map(|line| { + if let Some(pos) = line.find("cortex://") { + // 截取到空白字符或行末 + let uri_start = &line[pos..]; + let uri_end = uri_start + .find(|c: char| c.is_whitespace()) + .unwrap_or(uri_start.len()); + Some(uri_start[..uri_end].to_string()) + } else { + None + } + }) +} From 47fc8ad46537c754f9ca771eb371161e2122e693 Mon Sep 17 00:00:00 2001 From: Sopaco Date: Tue, 10 Mar 2026 19:22:40 +0800 Subject: [PATCH 11/20] docs update --- README.md | 58 +- README_zh.md | 60 +- cortex-mem-cli/README.md | 34 +- cortex-mem-core/README.md | 40 + cortex-mem-mcp/README.md | 8 +- cortex-mem-service/README.md | 14 +- .../1_embedding_api_latency_optimization.md | 772 ------------- .../2_entity_query_recall_optimization.md | 1022 ----------------- .../3_memory_forgetting_mechanism.md | 863 -------------- docs_plan_v2.6/4_dynamic_weight_allocation.md | 929 --------------- litho.docs/en/1.Overview.md | 2 +- litho.docs/en/2.Architecture.md | 5 +- ...71\347\233\256\346\246\202\350\277\260.md" | 2 +- ...66\346\236\204\346\246\202\350\247\210.md" | 5 +- 14 files changed, 160 insertions(+), 3654 deletions(-) delete mode 100644 docs_plan_v2.6/1_embedding_api_latency_optimization.md delete mode 100644 docs_plan_v2.6/2_entity_query_recall_optimization.md delete mode 100644 docs_plan_v2.6/3_memory_forgetting_mechanism.md delete mode 100644 docs_plan_v2.6/4_dynamic_weight_allocation.md diff --git a/README.md b/README.md index 51de240..100cffd 100644 --- a/README.md +++ b/README.md @@ -112,6 +112,8 @@ cortex://resources/{resource_name}/ - Multi-Tenancy Support: Isolated memory spaces for different users and agents within a single deployment via tenant-aware collection naming. - Event-Driven Automation: File watchers and auto-indexers for background processing, synchronization, and profile enrichment. - LLM Result Caching: Intelligent caching with LRU eviction and TTL expiration reduces redundant LLM API calls by 50-75%, with cascade layer debouncing for 70-90% reduction in layer updates. +- Incremental Memory Updates: Introduced an event-driven incremental update system (`MemoryEventCoordinator`, `CascadeLayerUpdater`) that keeps L0/L1 layers in sync automatically as memories change. +- Memory Forgetting Mechanism: Introduced `MemoryCleanupService` based on the Ebbinghaus forgetting curve — automatically archives or deletes low-strength memories to control storage growth in long-running agents. - Agent Framework Integration: Built-in support for Rig framework and Model Context Protocol (MCP). - Web Dashboard: Svelte 5 SPA (Insights) for monitoring, tenant management, and semantic search visualization. @@ -232,9 +234,9 @@ graph TD Core --> LLM ``` -- `cortex-mem-core`: The heart of the system. Contains business logic for filesystem abstraction (`cortex://` URI), LLM client wrappers, embedding generation, Qdrant integration, session management, layer generation (L0/L1/L2), extraction engine, search engine, and automation orchestrator. -- `cortex-mem-service`: High-performance REST API server (Axum-based) exposing all memory operations via `/api/v2/*` endpoints. -- `cortex-mem-cli`: Command-line tool for developers and administrators to interact with the memory store directly. +- `cortex-mem-core`: The heart of the system. Contains business logic for filesystem abstraction (`cortex://` URI), LLM client wrappers, embedding generation, Qdrant integration, session management, layer generation (L0/L1/L2), extraction engine, search engine, automation orchestrator, and incremental update system (`MemoryEventCoordinator`, `CascadeLayerUpdater`, `LlmResultCache`, `IncrementalMemoryUpdater`) as well as forgetting mechanism (`MemoryCleanupService`). +- `cortex-mem-service`: High-performance REST API server (Axum-based) exposing all memory operations via `/api/v2/*` endpoints. Runs on port 8085 by default. +- `cortex-mem-cli`: Command-line tool (`cortex-mem` binary) for developers and administrators to interact with the memory store directly. - `cortex-mem-insights`: Pure frontend Svelte 5 SPA for monitoring, analytics, and memory management through a web interface. - `cortex-mem-mcp`: Model Context Protocol server for integration with AI assistants (Claude Desktop, Cursor, etc.). - `cortex-mem-rig`: Integration layer with the rig-core agent framework for tool registration. @@ -460,47 +462,48 @@ The CLI provides a powerful interface for direct interaction with the memory sys Adds a new message to a session thread, automatically storing it in the memory system. ```sh -cortex-mem-cli --config config.toml --tenant acme add --thread thread-123 --role user --content "The user is interested in Rust programming." +cortex-mem --config config.toml --tenant acme add --thread thread-123 --role user "The user is interested in Rust programming." ``` - `--thread `: (Required) The thread/session ID. - `--role `: Message role (user/assistant/system). Default: "user" -- `--content `: The text content of the message. +- `content`: The text content of the message (positional argument). #### Search for Memories Performs a semantic vector search across the memory store with weighted L0/L1/L2 scoring. ```sh -cortex-mem-cli --config config.toml --tenant acme search "what are the user's hobbies?" --thread thread-123 --limit 10 +cortex-mem --config config.toml --tenant acme search "what are the user's hobbies?" --thread thread-123 --limit 10 ``` - `query`: The natural language query for the search. - `--thread `: Filter memories by thread ID. -- `--limit `: Maximum number of results. Default: 10 -- `--min-score `: Minimum relevance score (0.0-1.0). Default: 0.3 +- `--limit ` / `-n`: Maximum number of results. Default: 10 +- `--min-score ` / `-s`: Minimum relevance score (0.0-1.0). Default: 0.4 - `--scope `: Search scope: "session", "user", or "agent". Default: "session" #### List Memories Retrieves a list of memories from a specific URI path. ```sh -cortex-mem-cli --config config.toml --tenant acme list --uri "cortex://session" --include-abstracts +cortex-mem --config config.toml --tenant acme list --uri "cortex://session" --include-abstracts ``` -- `--uri `: URI path to list (e.g., "cortex://session" or "cortex://user/preferences"). +- `--uri ` / `-u`: URI path to list (e.g., "cortex://session" or "cortex://user/preferences"). Default: `cortex://session` - `--include-abstracts`: Include L0 abstracts in results. #### Get a Specific Memory Retrieves a specific memory by its URI. ```sh -cortex-mem-cli --config config.toml --tenant acme get "cortex://session/thread-123/memory-456.md" +cortex-mem --config config.toml --tenant acme get "cortex://session/thread-123/memory-456.md" ``` - `uri`: The memory URI. -- `--abstract-only`: Show L0 abstract instead of full content. +- `--abstract-only` / `-a`: Show L0 abstract instead of full content. +- `--overview` / `-o`: Show L1 overview instead of full content. #### Delete a Memory Removes a memory from the store by its URI. ```sh -cortex-mem-cli --config config.toml --tenant acme delete "cortex://session/thread-123/memory-456.md" +cortex-mem --config config.toml --tenant acme delete "cortex://session/thread-123/memory-456.md" ``` #### Session Management @@ -508,30 +511,33 @@ Manage conversation sessions. ```sh # List all sessions -cortex-mem-cli --config config.toml --tenant acme session list +cortex-mem --config config.toml --tenant acme session list # Create a new session -cortex-mem-cli --config config.toml --tenant acme session create thread-456 --title "My Session" +cortex-mem --config config.toml --tenant acme session create thread-456 --title "My Session" -# Close a session (triggers extraction) -cortex-mem-cli --config config.toml --tenant acme session close thread-456 +# Close a session (triggers extraction, layer generation, and vector indexing) +cortex-mem --config config.toml --tenant acme session close thread-456 ``` -#### Sync, Layers, and Stats -Synchronize filesystem with vector store, manage layer files, and display system statistics. +#### Layers and Stats +Manage layer files and display system statistics. ```sh # Display system statistics -cortex-mem-cli --config config.toml --tenant acme stats +cortex-mem --config config.toml --tenant acme stats # List available tenants -cortex-mem-cli --config config.toml tenant list +cortex-mem --config config.toml tenant list # Show L0/L1 layer file coverage status -cortex-mem-cli --config config.toml --tenant acme layers status +cortex-mem --config config.toml --tenant acme layers status # Generate missing L0/L1 layer files -cortex-mem-cli --config config.toml --tenant acme layers ensure-all +cortex-mem --config config.toml --tenant acme layers ensure-all + +# Regenerate oversized L0 abstract files (> 2K characters) +cortex-mem --config config.toml --tenant acme layers regenerate-oversized ``` ### REST API (`cortex-mem-service`) @@ -540,11 +546,11 @@ The REST API allows you to integrate Cortex Memory into any application, regardl #### Starting the Service ```sh -# Start the API server with default settings -cortex-mem-service --data-dir ./cortex-data --host 127.0.0.1 --port 8085 +# Start the API server with default settings (port 8085) +cortex-mem-service --config config.toml --host 127.0.0.1 --port 8085 # Enable verbose logging -cortex-mem-service -d ./cortex-data -h 127.0.0.1 -p 8085 --verbose +cortex-mem-service --config config.toml -h 127.0.0.1 -p 8085 --verbose ``` #### API Endpoints diff --git a/README_zh.md b/README_zh.md index 0afba2e..d6b8165 100644 --- a/README_zh.md +++ b/README_zh.md @@ -112,6 +112,8 @@ cortex://resources/{resource_name}/ - 多租户支持: 通过租户感知集合命名,在单个部署中为不同用户和代理提供隔离的内存空间。 - 事件驱动自动化: 文件监视器和自动索引器用于后台处理、同步和配置文件丰富。 - LLM结果缓存: 智能缓存采用LRU淘汰和TTL过期机制,减少50-75%的冗余LLM API调用,级联层防抖可减少70-90%的层更新调用。 +- 增量记忆更新: 引入了事件驱动的增量更新系统(`MemoryEventCoordinator`、`CascadeLayerUpdater`),在记忆变更时自动保持 L0/L1 层级同步。 +- 记忆遗忘机制: 引入了基于 Ebbinghaus 遗忘曲线的 `MemoryCleanupService`——自动归档或删除低强度记忆,控制长期运行 Agent 的存储空间膨胀。 - 代理框架集成: 内置支持Rig框架和模型上下文协议(MCP)。 - Web仪表板: Svelte 5 SPA(Insights)用于监控、租户管理和语义搜索可视化。 @@ -233,9 +235,9 @@ graph TD Core --> LLM ``` -- `cortex-mem-core`:系统的核心。包含文件系统抽象(`cortex://` URI)、LLM客户端包装器、嵌入生成、Qdrant集成、会话管理、层生成(L0/L1/L2)、提取引擎、搜索引擎和自动化编排器的业务逻辑。 -- `cortex-mem-service`:高性能REST API服务器(基于Axum),通过`/api/v2/*`端点暴露所有内存操作。 -- `cortex-mem-cli`:供开发和管理员直接与内存存储交互的命令行工具。 +- `cortex-mem-core`:系统的核心。包含文件系统抽象(`cortex://` URI)、LLM客户端包装器、嵌入生成、Qdrant集成、会话管理、层生成(L0/L1/L2)、提取引擎、搜索引擎和自动化编排器的业务逻辑,以及 增量更新系统(`MemoryEventCoordinator`、`CascadeLayerUpdater`、`LlmResultCache`、`IncrementalMemoryUpdater`)和 遗忘机制(`MemoryCleanupService`)。 +- `cortex-mem-service`:高性能REST API服务器(基于Axum),通过`/api/v2/*`端点暴露所有内存操作,默认端口 8085。 +- `cortex-mem-cli`:二进制名为 `cortex-mem` 的命令行工具,供开发人员和管理员直接与内存存储交互。 - `cortex-mem-insights`:纯前端Svelte 5 SPA,用于通过Web界面进行监控、分析和内存管理。 - `cortex-mem-mcp`:模型上下文协议服务器,用于与AI助手(Claude Desktop、Cursor等)集成。 - `cortex-mem-rig`:与rig-core代理框架的集成层,用于工具注册。 @@ -456,53 +458,54 @@ data_dir = "./cortex-data" # 用于存储内存文件和会话的目录 ### CLI (`cortex-mem-cli`) -CLI提供了直接与内存系统交互的强大界面。所有命令都需要`config.toml`文件,可以使用`--config `指定。`--tenant`标志允许多租户隔离。 +CLI提供了直接与内存系统交互的强大界面,二进制名称为 `cortex-mem`。所有命令都需要`config.toml`文件,可以使用`--config `(短选项 `-c`)指定。`--tenant`标志允许多租户隔离。 #### 添加内存 向会话线程添加新消息,自动存储在内存系统中。 ```sh -cortex-mem-cli --config config.toml --tenant acme add --thread thread-123 --role user --content "用户对Rust编程感兴趣。" +cortex-mem --config config.toml --tenant acme add --thread thread-123 --role user "用户对Rust编程感兴趣。" ``` -- `--thread `:(必需)线程/会话ID。 -- `--role `:消息角色(user/assistant/system)。默认:"user" -- `--content `:消息的文本内容。 +- `--thread ` / `-t`:(必需)线程/会话ID。 +- `--role ` / `-r`:消息角色(user/assistant/system)。默认:"user" +- `content`:消息的文本内容(位置参数)。 #### 搜索内存 在内存存储中执行带有加权L0/L1/L2评分的语义向量搜索。 ```sh -cortex-mem-cli --config config.toml --tenant acme search "用户的爱好是什么?" --thread thread-123 --limit 10 +cortex-mem --config config.toml --tenant acme search "用户的爱好是什么?" --thread thread-123 --limit 10 ``` - `query`:搜索的自然语言查询。 - `--thread `:按线程ID过滤内存。 -- `--limit `:最大结果数。默认:10 -- `--min-score `:最小相关性分数(0.0-1.0)。默认:0.3 +- `--limit ` / `-n`:最大结果数。默认:10 +- `--min-score ` / `-s`:最小相关性分数(0.0-1.0)。默认:0.4 - `--scope `:搜索范围:"session"、"user"或"agent"。默认:"session" #### 列出内存 从特定URI路径检索内存列表。 ```sh -cortex-mem-cli --config config.toml --tenant acme list --uri "cortex://session" --include-abstracts +cortex-mem --config config.toml --tenant acme list --uri "cortex://session" --include-abstracts ``` -- `--uri `:要列出的URI路径(例如,"cortex://session"或"cortex://user/preferences")。 +- `--uri ` / `-u`:要列出的URI路径(例如,"cortex://session"或"cortex://user/preferences")。默认:`cortex://session` - `--include-abstracts`:在结果中包含L0摘要。 #### 获取特定内存 按其URI检索特定内存。 ```sh -cortex-mem-cli --config config.toml --tenant acme get "cortex://session/thread-123/memory-456.md" +cortex-mem --config config.toml --tenant acme get "cortex://session/thread-123/memory-456.md" ``` - `uri`:内存URI。 -- `--abstract-only`:显示L0摘要而不是完整内容。 +- `--abstract-only` / `-a`:显示L0摘要而不是完整内容。 +- `--overview` / `-o`:显示L1概览而不是完整内容。 #### 删除内存 按其URI从存储中删除内存。 ```sh -cortex-mem-cli --config config.toml --tenant acme delete "cortex://session/thread-123/memory-456.md" +cortex-mem --config config.toml --tenant acme delete "cortex://session/thread-123/memory-456.md" ``` #### 会话管理 @@ -510,13 +513,13 @@ cortex-mem-cli --config config.toml --tenant acme delete "cortex://session/threa ```sh # 列出所有会话 -cortex-mem-cli --config config.toml --tenant acme session list +cortex-mem --config config.toml --tenant acme session list # 创建新会话 -cortex-mem-cli --config config.toml --tenant acme session create thread-456 --title "我的会话" +cortex-mem --config config.toml --tenant acme session create thread-456 --title "我的会话" -# 关闭会话(触发提取) -cortex-mem-cli --config config.toml --tenant acme session close thread-456 +# 关闭会话(触发记忆提取、层级生成和向量索引) +cortex-mem --config config.toml --tenant acme session close thread-456 ``` #### 层级管理和统计 @@ -524,16 +527,19 @@ cortex-mem-cli --config config.toml --tenant acme session close thread-456 ```sh # 显示系统统计信息 -cortex-mem-cli --config config.toml --tenant acme stats +cortex-mem --config config.toml --tenant acme stats # 列出可用租户 -cortex-mem-cli --config config.toml tenant list +cortex-mem --config config.toml tenant list # 显示L0/L1层级文件覆盖状态 -cortex-mem-cli --config config.toml --tenant acme layers status +cortex-mem --config config.toml --tenant acme layers status # 生成缺失的L0/L1层级文件 -cortex-mem-cli --config config.toml --tenant acme layers ensure-all +cortex-mem --config config.toml --tenant acme layers ensure-all + +# 重新生成超大 L0 摘要文件(> 2K 字符) +cortex-mem --config config.toml --tenant acme layers regenerate-oversized ``` ### REST API (`cortex-mem-service`) @@ -542,11 +548,11 @@ REST API允许您将Cortex Memory集成到任何应用程序中,无论编程 #### 启动服务 ```sh -# 使用默认设置启动API服务器 -cortex-mem-service --data-dir ./cortex-data --host 127.0.0.1 --port 8085 +# 使用默认设置启动API服务器(默认端口 8085) +cortex-mem-service --config config.toml --host 127.0.0.1 --port 8085 # 启用详细日志记录 -cortex-mem-service -d ./cortex-data -h 127.0.0.1 -p 8085 --verbose +cortex-mem-service --config config.toml -h 127.0.0.1 -p 8085 --verbose ``` #### API端点 diff --git a/cortex-mem-cli/README.md b/cortex-mem-cli/README.md index 9409f96..759a8a5 100644 --- a/cortex-mem-cli/README.md +++ b/cortex-mem-cli/README.md @@ -126,17 +126,19 @@ cortex-mem list --include-abstracts #### Get Memory ```bash -cortex-mem get [--abstract-only] +cortex-mem get [--abstract-only] [--overview] # Examples cortex-mem get cortex://session/tech-support/timeline/2024/01/15/14_30_00_abc123.md cortex-mem get cortex://session/tech-support/timeline/2024/01/15/14_30_00_abc123.md --abstract-only +cortex-mem get cortex://session/tech-support/timeline/2024/01/15/14_30_00_abc123.md --overview ``` | Argument | Short | Default | Description | |----------|-------|---------|-------------| | `uri` | | (required) | Memory URI to retrieve | -| `--abstract-only` | `-a` | false | Show L0 abstract instead of full content | +| `--abstract-only` | `-a` | false | Show L0 abstract (~100 tokens) instead of full content | +| `--overview` | `-o` | false | Show L1 overview (structured summary) instead of full content | #### Delete Memory @@ -147,6 +149,22 @@ cortex-mem delete cortex-mem delete cortex://session/tech-support/timeline/2024/01/15/14_30_00_abc123.md ``` +#### Session Close + +Close a session and trigger memory extraction, L0/L1 layer generation, and vector indexing. + +```bash +cortex-mem session close + +# Example +cortex-mem session close customer-support +``` + +This is the key command for finalizing a conversation — it runs the full processing pipeline: +1. **Memory Extraction**: LLM analyzes the conversation and extracts structured facts, decisions, and entities. +2. **Layer Generation**: L0 (abstract) and L1 (overview) files are generated or updated. +3. **Vector Indexing**: All layers are embedded and indexed in Qdrant for future semantic search. + ### Layer Commands #### Ensure All Layers @@ -319,6 +337,18 @@ cortex-mem --verbose session create debug-test RUST_BACKTRACE=1 cortex-mem search "test" ``` +## 🧪 Testing + +```bash +# Run basic tests (no external services needed) +cargo test -p cortex-mem-cli + +# Run all tests including integration tests (requires Qdrant + LLM + Embedding) +CONFIG_PATH=./config.toml TENANT_ID=testcase_user cargo test -p cortex-mem-cli -- --include-ignored +``` + +Tests are automatically run in single-threaded mode (configured in `.cargo/config.toml`) to avoid Qdrant collection creation race conditions. + ## 📚 Related Resources - [Cortex Memory Main Project](../README.md) diff --git a/cortex-mem-core/README.md b/cortex-mem-core/README.md index 5b21fb4..9f59245 100644 --- a/cortex-mem-core/README.md +++ b/cortex-mem-core/README.md @@ -11,6 +11,8 @@ Cortex Memory Core implements: - Vector search integration with Qdrant - LLM-based memory extraction and profiling - Event-driven automation system +- Incremental memory update system with event coordination, cascade layer updates, and LLM result caching +- Memory forgetting mechanism based on the Ebbinghaus forgetting curve ## 🏗️ Architecture @@ -29,6 +31,16 @@ Cortex Memory Core implements: | **`embedding`** | Embedding generation | `EmbeddingClient`, `EmbeddingCache` | | **`events`** | Event system for automation | `CortexEvent`, `EventBus` | | **`builder`** | Unified initialization API | `CortexMemBuilder`, `CortexMem` | +| **`memory_index`** | Memory index and version tracking | `MemoryIndex`, `MemoryMetadata`, `MemoryScope`, `MemoryType` | +| **`memory_events`** | Memory change event types | `MemoryEvent`, `ChangeType`, `DeleteReason` | +| **`memory_index_manager`** | Persistent index management | `MemoryIndexManager` | +| **`incremental_memory_updater`** | Incremental diff-based updates | `IncrementalMemoryUpdater` | +| **`cascade_layer_updater`** | Cascading L0/L1 layer updates | `CascadeLayerUpdater`, `UpdateStats` | +| **`cascade_layer_debouncer`** | Batch debouncing for layer updates | `LayerUpdateDebouncer`, `DebouncerConfig` | +| **`llm_result_cache`** | LRU+TTL cache for LLM results | `LlmResultCache`, `CacheConfig`, `CacheStats` | +| **`vector_sync_manager`** | Vector store sync coordination | `VectorSyncManager`, `VectorSyncStats` | +| **`memory_event_coordinator`** | Central event orchestration hub | `MemoryEventCoordinator`, `CoordinatorConfig` | +| **`memory_cleanup`** | Forgetting mechanism | `MemoryCleanupService`, `MemoryCleanupConfig`, `CleanupStats` | ## 🚀 Quick Start @@ -447,6 +459,34 @@ pub enum FilesystemEvent { } ``` +## ⚡ Incremental Update System + +Introduced an event-driven incremental update pipeline that keeps memory layers in sync efficiently: + +- **`MemoryEventCoordinator`**: Central hub that receives `MemoryEvent`s (create/update/delete) and orchestrates downstream processing. +- **`IncrementalMemoryUpdater`**: Computes content diffs to only re-process changed memories, skipping unchanged content. +- **`CascadeLayerUpdater`**: When a memory changes, cascades L0/L1 layer updates up the directory tree. Uses content hash check (Phase 1) and LLM result cache (Phase 3) to minimize redundant work. +- **`LayerUpdateDebouncer`**: Batches rapid successive updates to the same directory (Phase 2), reducing LLM calls by 70-90%. +- **`LlmResultCache`**: LRU + TTL cache for generated L0/L1 content. Reduces LLM API costs by 50-75% for repeated content. +- **`VectorSyncManager`**: Keeps the Qdrant vector store synchronized with filesystem changes. + +## 🧹 Memory Cleanup (Forgetting Mechanism) + +The `MemoryCleanupService` which implements the Ebbinghaus forgetting curve: + +- Periodically scans the memory index and calculates **memory strength** based on recency and access frequency. +- Memories with strength below `archive_threshold` (default: 0.1) are **archived** (marked but not deleted). +- Archived memories with strength below `delete_threshold` (default: 0.02) are **permanently deleted**. +- Prevents unbounded storage growth in long-running AI agents. + +```rust +use cortex_mem_core::{MemoryCleanupService, MemoryCleanupConfig, MemoryScope}; + +let svc = MemoryCleanupService::new(index_manager, MemoryCleanupConfig::default()); +let stats = svc.run_cleanup(&MemoryScope::User, "alice").await?; +println!("Archived: {}, Deleted: {}", stats.archived, stats.deleted); +``` + ## 🔗 Integration with Other Crates - **`cortex-mem-config`**: Configuration loading and management diff --git a/cortex-mem-mcp/README.md b/cortex-mem-mcp/README.md index 46b22f7..192ed39 100644 --- a/cortex-mem-mcp/README.md +++ b/cortex-mem-mcp/README.md @@ -267,6 +267,10 @@ cargo build --release --bin cortex-mem-mcp |----------|---------|-------------| | `--config` / `-c` | `config.toml` | Path to configuration file | | `--tenant` | `default` | Tenant ID for memory isolation | +| `--auto-trigger-threshold` | `10` | Message count threshold to auto-trigger memory extraction | +| `--auto-trigger-interval` | `300` | Minimum seconds between auto-trigger executions | +| `--auto-trigger-inactivity` | `120` | Inactivity timeout in seconds to trigger extraction | +| `--no-auto-trigger` | `false` | Disable the auto-trigger feature entirely | ### Configure Claude Desktop @@ -291,7 +295,9 @@ Add configuration: "command": "/path/to/cortex-mem-mcp", "args": [ "--config", "/path/to/config.toml", - "--tenant", "default" + "--tenant", "default", + "--auto-trigger-threshold", "10", + "--auto-trigger-interval", "300" ], "env": { "RUST_LOG": "info" diff --git a/cortex-mem-service/README.md b/cortex-mem-service/README.md index 2c36785..fd4b3ff 100644 --- a/cortex-mem-service/README.md +++ b/cortex-mem-service/README.md @@ -22,7 +22,7 @@ cd cortex-mem cargo build --release -p cortex-mem-service -# 使用默认配置启动 +# 使用默认配置启动(默认端口 8085) ./target/release/cortex-mem-service # 指定端口和数据目录 @@ -38,7 +38,7 @@ docker build -t cortex-mem-service -f docker/Dockerfile . # 运行容器 docker run -d \ --name cortex-mem \ - -p 8080:8080 \ + -p 8085:8085 \ -v $(pwd)/cortex-data:/app/cortex-data \ cortex-mem-service ``` @@ -268,12 +268,10 @@ Content-Type: application/json | 参数 | 默认值 | 说明 | |------|--------|------| -| `--port` | `8080` | 监听端口 | +| `--port` / `-p` | `8085` | 监听端口 | | `--host` | `127.0.0.1` | 绑定地址 | -| `--data-dir` | `./cortex-data` | 数据目录 | -| `--workers` | `CPU 核心数` | 工作线程数 | -| `--log-level` | `info` | 日志级别 | -| `--cors` | `*` | CORS 允许的源 | +| `--data-dir` / `-d` | `./cortex-data` | 数据目录 | +| `--verbose` / `-v` | `false` | 启用详细日志 | ### 环境变量 @@ -287,7 +285,7 @@ export LLM_API_BASE_URL="https://api.openai.com/v1" export LLM_API_KEY="your-api-key" # 服务配置 -export CORTEX_SERVICE_PORT=8080 +export CORTEX_SERVICE_PORT=8085 export CORTEX_SERVICE_HOST="0.0.0.0" export RUST_LOG="cortex_service=debug" ``` diff --git a/docs_plan_v2.6/1_embedding_api_latency_optimization.md b/docs_plan_v2.6/1_embedding_api_latency_optimization.md deleted file mode 100644 index 6cf97f5..0000000 --- a/docs_plan_v2.6/1_embedding_api_latency_optimization.md +++ /dev/null @@ -1,772 +0,0 @@ -# 嵌入 API 延迟优化技术方案 - -## 1. 问题分析 - -### 1.1 当前实现缺陷 - -**位置**: `cortex-mem-core/src/embedding/client.rs:103-105` - -```rust -// 当前实现:强制等待1秒以避免限流 -tokio::time::sleep(std::time::Duration::from_secs(1)).await; -``` - -这个固定延迟存在以下问题: - -1. **过度等待**:API 可能允许更高的请求频率,固定 1 秒延迟过于保守 -2. **无法应对突发流量**:批量索引 100 个文件至少需要 100 秒,用户体验极差 -3. **浪费 API 配额**:在低频使用时,等待时间完全可以被利用 -4. **不可配置**:硬编码延迟时间,无法根据不同 API 提供商调整 - -### 1.2 现有缓存层的问题 - -**位置**: `cortex-mem-core/src/embedding/cache.rs` - -虽然已有 EmbeddingCache 实现,但存在以下问题: - -1. **缓存层未集成**:缓存代码存在但未被实际使用 -2. **缓存键过于简单**:仅使用文本哈希,未考虑模型版本变化 -3. **缺少持久化**:重启后缓存丢失,需要重新请求 API -4. **缺少统计信息**:无法监控命中率和效果 - -### 1.3 批处理效率问题 - -当前 `embed_batch_chunked` 方法是顺序执行的: - -```rust -pub async fn embed_batch_chunked(&self, texts: &[String]) -> Result>> { - for chunk in texts.chunks(self.config.batch_size) { - let embeddings = self.embed_batch(chunk).await?; // 顺序执行 - all_embeddings.extend(embeddings); - } -} -``` - -## 2. 解决方案 - -### 2.1 架构设计 - -``` -┌─────────────────────────────────────────────────────────────────────────────┐ -│ 优化后的嵌入生成架构 │ -├─────────────────────────────────────────────────────────────────────────────┤ -│ │ -│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │ -│ │ 用户请求 │───►│ 持久化缓存 │───►│ 自适应限流器 │───►│ 批量并发 │ │ -│ │ │ │ (磁盘+内存) │ │ │ │ 调度器 │ │ -│ └─────────────┘ └─────────────┘ └─────────────┘ └─────────────┘ │ -│ │ │ │ │ -│ ▼ ▼ ▼ │ -│ ┌──────────────────────────────────────────────────┐ │ -│ │ 嵌入 API │ │ -│ └──────────────────────────────────────────────────┘ │ -│ │ -└─────────────────────────────────────────────────────────────────────────────┘ -``` - -### 2.2 自适应速率限制器 - -#### 2.2.1 令牌桶算法实现 - -```rust -// 文件: cortex-mem-core/src/embedding/rate_limiter.rs (新建) - -use std::sync::Arc; -use tokio::sync::Mutex; -use std::time::{Duration, Instant}; - -/// 速率限制配置 -#[derive(Debug, Clone)] -pub struct RateLimitConfig { - /// 令牌桶容量(最大突发请求数) - pub max_burst: u32, - /// 令牌补充速率(每秒补充的令牌数) - pub tokens_per_second: f32, - /// 最小等待时间(毫秒),避免过于频繁的请求 - pub min_wait_ms: u64, - /// 最大等待时间(毫秒),超过此时间报错 - pub max_wait_ms: u64, -} - -impl Default for RateLimitConfig { - fn default() -> Self { - Self { - max_burst: 10, // 允许突发 10 个请求 - tokens_per_second: 2.0, // 每秒 2 个请求(根据 API 调整) - min_wait_ms: 50, // 最小间隔 50ms - max_wait_ms: 30000, // 最多等待 30 秒 - } - } -} - -/// OpenAI API 配置 -impl RateLimitConfig { - pub fn openai() -> Self { - Self { - max_burst: 20, - tokens_per_second: 5.0, // OpenAI 允许较高频率 - min_wait_ms: 20, - max_wait_ms: 60000, - } - } - - /// Azure OpenAI 配置 - pub fn azure_openai() -> Self { - Self { - max_burst: 10, - tokens_per_second: 3.0, - min_wait_ms: 50, - max_wait_ms: 60000, - } - } - - /// 本地模型配置(无限制) - pub fn local() -> Self { - Self { - max_burst: 1000, - tokens_per_second: 1000.0, - min_wait_ms: 0, - max_wait_ms: 0, - } - } -} - -/// 自适应速率限制器 -pub struct AdaptiveRateLimiter { - config: RateLimitConfig, - state: Arc>, -} - -struct RateLimiterState { - tokens: f32, - last_refill: Instant, - /// 自适应调整:记录最近的错误 - recent_errors: u32, - /// 自适应调整:当前退避倍数 - backoff_multiplier: f32, -} - -impl AdaptiveRateLimiter { - pub fn new(config: RateLimitConfig) -> Self { - Self { - config, - state: Arc::new(Mutex::new(RateLimiterState { - tokens: config.max_burst as f32, - last_refill: Instant::now(), - recent_errors: 0, - backoff_multiplier: 1.0, - })), - } - } - - /// 获取一个令牌(阻塞直到可用) - pub async fn acquire(&self) -> Result<(), crate::Error> { - let wait_duration = self.try_acquire().await?; - - if let Some(duration) = wait_duration { - if duration.as_millis() as u64 > self.config.max_wait_ms { - return Err(crate::Error::Embedding( - "Rate limit exceeded: max wait time reached".to_string() - )); - } - tokio::time::sleep(duration).await; - } - - // 确保最小间隔 - if self.config.min_wait_ms > 0 { - tokio::time::sleep(Duration::from_millis(self.config.min_wait_ms)).await; - } - - Ok(()) - } - - async fn try_acquire(&self) -> Result, crate::Error> { - let mut state = self.state.lock().await; - - // 补充令牌 - let now = Instant::now(); - let elapsed = now.duration_since(state.last_refill).as_secs_f32(); - let tokens_to_add = elapsed * self.config.tokens_per_second / state.backoff_multiplier; - state.tokens = (state.tokens + tokens_to_add).min(self.config.max_burst as f32); - state.last_refill = now; - - if state.tokens >= 1.0 { - state.tokens -= 1.0; - Ok(None) - } else { - // 计算需要等待的时间 - let tokens_needed = 1.0 - state.tokens; - let wait_secs = tokens_needed * state.backoff_multiplier / self.config.tokens_per_second; - Ok(Some(Duration::from_secs_f32(wait_secs))) - } - } - - /// 报告错误(用于自适应调整) - pub async fn report_error(&self, is_rate_limit_error: bool) { - let mut state = self.state.lock().await; - - if is_rate_limit_error { - state.recent_errors += 1; - // 指数退避 - state.backoff_multiplier = (state.backoff_multiplier * 1.5).min(10.0); - tracing::warn!( - "Rate limit error detected, increasing backoff to {:.2}x", - state.backoff_multiplier - ); - } - } - - /// 报告成功(恢复正常速率) - pub async fn report_success(&self) { - let mut state = self.state.lock().await; - state.recent_errors = 0; - // 逐渐恢复 - state.backoff_multiplier = (state.backoff_multiplier * 0.9).max(1.0); - } - - /// 获取当前状态 - pub async fn status(&self) -> RateLimiterStatus { - let state = self.state.lock().await; - RateLimiterStatus { - available_tokens: state.tokens, - backoff_multiplier: state.backoff_multiplier, - recent_errors: state.recent_errors, - } - } -} - -#[derive(Debug, Clone)] -pub struct RateLimiterStatus { - pub available_tokens: f32, - pub backoff_multiplier: f32, - pub recent_errors: u32, -} -``` - -### 2.3 持久化缓存层 - -#### 2.3.1 缓存存储设计 - -```rust -// 文件: cortex-mem-core/src/embedding/persistent_cache.rs (新建) - -use serde::{Deserialize, Serialize}; -use std::path::PathBuf; -use std::collections::HashMap; -use tokio::sync::RwLock; -use std::time::{Duration, Instant}; - -/// 缓存条目 -#[derive(Debug, Clone, Serialize, Deserialize)] -struct CacheEntry { - /// 嵌入向量 - embedding: Vec, - /// 模型名称(用于模型升级时失效) - model_name: String, - /// 模型版本(如果 API 提供) - model_version: Option, - /// 创建时间戳 - created_at: i64, - /// 最后访问时间戳 - last_accessed: i64, - /// 访问次数 - access_count: u32, - /// 文本哈希 - text_hash: String, -} - -/// 持久化缓存配置 -#[derive(Debug, Clone)] -pub struct PersistentCacheConfig { - /// 缓存目录 - pub cache_dir: PathBuf, - /// 内存中保留的热点条目数 - pub hot_cache_size: usize, - /// TTL(秒) - pub ttl_secs: u64, - /// 是否启用磁盘持久化 - pub persist_to_disk: bool, -} - -impl Default for PersistentCacheConfig { - fn default() -> Self { - Self { - cache_dir: PathBuf::from("./.embedding_cache"), - hot_cache_size: 10000, - ttl_secs: 86400 * 7, // 7 天 - persist_to_disk: true, - } - } -} - -/// 两级缓存:内存(热点)+ 磁盘(持久化) -pub struct PersistentEmbeddingCache { - config: PersistentCacheConfig, - model_name: String, - /// 内存中的热点缓存(LRU) - hot_cache: Arc>>, - /// 访问顺序(LRU 实现) - access_order: Arc>>, - /// 统计信息 - stats: Arc>, -} - -#[derive(Debug, Clone, Default)] -pub struct CacheStats { - pub total_requests: u64, - pub hot_hits: u64, - pub disk_hits: u64, - pub misses: u64, - pub evictions: u64, -} - -impl PersistentEmbeddingCache { - pub async fn new(config: PersistentCacheConfig, model_name: String) -> Result { - let cache = Self { - config, - model_name, - hot_cache: Arc::new(RwLock::new(HashMap::new())), - access_order: Arc::new(RwLock::new(Vec::new())), - stats: Arc::new(RwLock::new(CacheStats::default())), - }; - - // 从磁盘加载缓存 - if cache.config.persist_to_disk { - cache.load_from_disk().await?; - } - - Ok(cache) - } - - /// 生成缓存键(包含模型信息) - fn make_key(&self, text: &str) -> String { - use std::collections::hash_map::DefaultHasher; - use std::hash::{Hash, Hasher}; - - let mut hasher = DefaultHasher::new(); - text.hash(&mut hasher); - self.model_name.hash(&mut hasher); - - format!("{}:{:016x}", self.model_name, hasher.finish()) - } - - /// 获取嵌入(优先从缓存) - pub async fn get(&self, text: &str) -> Option> { - let key = self.make_key(text); - - // 统计请求 - { - let mut stats = self.stats.write().await; - stats.total_requests += 1; - } - - // 1. 先查内存缓存 - { - let cache = self.hot_cache.read().await; - if let Some(entry) = cache.get(&key) { - if self.is_valid(entry) { - // 更新访问信息 - self.record_access(&key).await; - - let mut stats = self.stats.write().await; - stats.hot_hits += 1; - - return Some(entry.embedding.clone()); - } - } - } - - // 2. 查磁盘缓存 - if self.config.persist_to_disk { - if let Ok(Some(entry)) = self.load_from_disk_entry(&key).await { - if self.is_valid(&entry) { - // 提升到内存缓存 - self.put_hot_cache(key.clone(), entry.clone()).await; - - let mut stats = self.stats.write().await; - stats.disk_hits += 1; - - return Some(entry.embedding); - } - } - } - - // 3. 缓存未命中 - { - let mut stats = self.stats.write().await; - stats.misses += 1; - } - - None - } - - /// 存入缓存 - pub async fn put(&self, text: &str, embedding: Vec) { - let key = self.make_key(text); - let text_hash = self.hash_text(text); - - let entry = CacheEntry { - embedding, - model_name: self.model_name.clone(), - model_version: None, - created_at: chrono::Utc::now().timestamp(), - last_accessed: chrono::Utc::now().timestamp(), - access_count: 1, - text_hash, - }; - - // 存入内存缓存 - self.put_hot_cache(key.clone(), entry.clone()).await; - - // 持久化到磁盘 - if self.config.persist_to_disk { - let _ = self.save_to_disk_entry(&key, &entry).await; - } - } - - /// 批量获取 - pub async fn get_batch(&self, texts: &[String]) -> Vec>> { - let mut results = Vec::with_capacity(texts.len()); - for text in texts { - results.push(self.get(text).await); - } - results - } - - /// 批量存储 - pub async fn put_batch(&self, texts: &[String], embeddings: Vec>) { - for (text, embedding) in texts.iter().zip(embeddings.into_iter()) { - self.put(text, embedding).await; - } - } - - /// 获取命中率 - pub async fn hit_rate(&self) -> f32 { - let stats = self.stats.read().await; - if stats.total_requests == 0 { - return 0.0; - } - (stats.hot_hits + stats.disk_hits) as f32 / stats.total_requests as f32 - } - - /// 清理过期缓存 - pub async fn cleanup_expired(&self) -> usize { - let mut cleaned = 0; - - // 清理内存缓存 - { - let mut cache = self.hot_cache.write().await; - let now = chrono::Utc::now().timestamp(); - let ttl = self.config.ttl_secs as i64; - - cache.retain(|_, entry| { - let valid = now - entry.created_at < ttl; - if !valid { - cleaned += 1; - } - valid - }); - } - - // 清理磁盘缓存 - if self.config.persist_to_disk { - cleaned += self.cleanup_disk_cache().await.unwrap_or(0); - } - - cleaned - } - - // ... 其他私有方法实现 - async fn is_valid(&self, entry: &CacheEntry) -> bool { - let now = chrono::Utc::now().timestamp(); - now - entry.created_at < self.config.ttl_secs as i64 - && entry.model_name == self.model_name - } - - async fn put_hot_cache(&self, key: String, entry: CacheEntry) { - let mut cache = self.hot_cache.write().await; - let mut order = self.access_order.write().await; - - // LRU 淘汰 - while cache.len() >= self.config.hot_cache_size { - if let Some(oldest) = order.first().cloned() { - cache.remove(&oldest); - order.remove(0); - - let mut stats = self.stats.write().await; - stats.evictions += 1; - } - } - - cache.insert(key.clone(), entry); - order.push(key); - } - - async fn record_access(&self, key: &str) { - let mut order = self.access_order.write().await; - if let Some(pos) = order.iter().position(|k| k == key) { - order.remove(pos); - order.push(key.to_string()); - } - } - - fn hash_text(&self, text: &str) -> String { - use std::collections::hash_map::DefaultHasher; - use std::hash::{Hash, Hasher}; - let mut hasher = DefaultHasher::new(); - text.hash(&mut hasher); - format!("{:016x}", hasher.finish()) - } - - async fn load_from_disk(&self) -> Result<(), crate::Error> { - // 实现:从磁盘加载缓存索引 - Ok(()) - } - - async fn load_from_disk_entry(&self, key: &str) -> Result, crate::Error> { - // 实现:从磁盘加载单个条目 - Ok(None) - } - - async fn save_to_disk_entry(&self, key: &str, entry: &CacheEntry) -> Result<(), crate::Error> { - // 实现:保存单个条目到磁盘 - Ok(()) - } - - async fn cleanup_disk_cache(&self) -> Result { - // 实现:清理磁盘上的过期缓存 - Ok(0) - } -} -``` - -### 2.4 集成改进后的 EmbeddingClient - -```rust -// 文件: cortex-mem-core/src/embedding/client.rs (修改) - -use crate::embedding::{AdaptiveRateLimiter, PersistentEmbeddingCache, RateLimitConfig}; -use std::sync::Arc; - -/// 增强的嵌入客户端 -pub struct EmbeddingClientV2 { - config: EmbeddingConfig, - http_client: reqwest::Client, - rate_limiter: AdaptiveRateLimiter, - cache: Option>, -} - -impl EmbeddingClientV2 { - pub fn new(config: EmbeddingConfig) -> Result { - let http_client = reqwest::Client::builder() - .timeout(std::time::Duration::from_secs(config.timeout_secs)) - .build() - .map_err(|e| crate::Error::Embedding(format!("Failed to create HTTP client: {}", e)))?; - - // 根据配置选择速率限制策略 - let rate_limit_config = match config.rate_limit_preset.as_str() { - "openai" => RateLimitConfig::openai(), - "azure" => RateLimitConfig::azure_openai(), - "local" => RateLimitConfig::local(), - _ => RateLimitConfig::default(), - }; - - Ok(Self { - config, - http_client, - rate_limiter: AdaptiveRateLimiter::new(rate_limit_config), - cache: None, // 延迟初始化 - }) - } - - /// 启用持久化缓存 - pub async fn with_cache(mut self, cache_dir: impl Into) -> Result { - let cache_config = PersistentCacheConfig { - cache_dir: cache_dir.into(), - ..Default::default() - }; - - self.cache = Some(Arc::new( - PersistentEmbeddingCache::new(cache_config, self.config.model_name.clone()).await? - )); - - Ok(self) - } - - /// 嵌入单个文本 - pub async fn embed(&self, text: &str) -> Result> { - // 1. 尝试从缓存获取 - if let Some(cache) = &self.cache { - if let Some(embedding) = cache.get(text).await { - tracing::debug!("Embedding cache hit"); - return Ok(embedding); - } - } - - // 2. 获取速率限制许可 - self.rate_limiter.acquire().await?; - - // 3. 调用 API - let embedding = self.call_api(&[text.to_string()]).await? - .into_iter() - .next() - .ok_or_else(|| crate::Error::Embedding("No embedding returned".to_string()))?; - - // 4. 存入缓存 - if let Some(cache) = &self.cache { - cache.put(text, embedding.clone()).await; - } - - // 5. 报告成功(用于自适应调整) - self.rate_limiter.report_success().await; - - Ok(embedding) - } - - /// 批量嵌入(智能调度) - pub async fn embed_batch_smart(&self, texts: &[String]) -> Result>> { - if texts.is_empty() { - return Ok(vec![]); - } - - // 1. 批量检查缓存 - let mut results: Vec>> = vec![None; texts.len()]; - let mut cache_miss_indices = Vec::new(); - let mut cache_miss_texts = Vec::new(); - - if let Some(cache) = &self.cache { - let cached = cache.get_batch(texts).await; - for (i, result) in cached.into_iter().enumerate() { - if let Some(embedding) = result { - results[i] = Some(embedding); - } else { - cache_miss_indices.push(i); - cache_miss_texts.push(texts[i].clone()); - } - } - } else { - cache_miss_indices = (0..texts.len()).collect(); - cache_miss_texts = texts.to_vec(); - } - - // 2. 批量处理未命中的请求 - if !cache_miss_texts.is_empty() { - let mut all_embeddings = Vec::new(); - - // 并发批处理(根据速率限制调整并发度) - let concurrent_batches = self.calculate_concurrent_batches().await; - for chunk in cache_miss_texts.chunks(self.config.batch_size * concurrent_batches) { - // 获取批量许可 - for _ in 0..(chunk.len().div_ceil(self.config.batch_size)) { - self.rate_limiter.acquire().await?; - } - - // 并发调用 API - let embeddings = self.call_api(chunk).await?; - all_embeddings.extend(embeddings); - } - - // 3. 存入缓存并填充结果 - if let Some(cache) = &self.cache { - cache.put_batch(&cache_miss_texts, all_embeddings.clone()).await; - } - - for (idx, embedding) in cache_miss_indices.into_iter().zip(all_embeddings.into_iter()) { - results[idx] = Some(embedding); - } - } - - Ok(results.into_iter().map(|opt| opt.unwrap()).collect()) - } - - /// 计算可以并发执行的批次数 - async fn calculate_concurrent_batches(&self) -> usize { - let status = self.rate_limiter.status().await; - // 根据可用令牌和退避倍数计算并发度 - let base_concurrent = (status.available_tokens as usize).max(1); - let adjusted = (base_concurrent as f32 / status.backoff_multiplier) as usize; - adjusted.max(1).min(4) // 限制最大并发为 4 - } - - async fn call_api(&self, texts: &[String]) -> Result>> { - // ... 原有的 API 调用逻辑,移除固定延迟 - // ... 错误处理时调用 self.rate_limiter.report_error(true).await - } -} -``` - -### 2.5 配置扩展 - -```toml -# config.toml 新增配置项 - -[embedding] -api_base_url = "https://api.openai.com/v1" -api_key = "${OPENAI_API_KEY}" -model_name = "text-embedding-3-small" -batch_size = 32 -timeout_secs = 30 - -# 新增:速率限制预设 -rate_limit_preset = "openai" # openai, azure, local, custom - -# 新增:缓存配置 -[embedding.cache] -enabled = true -cache_dir = "./.embedding_cache" -hot_cache_size = 10000 -ttl_days = 7 -persist_to_disk = true - -# 新增:自定义速率限制(当 preset = "custom" 时使用) -[embedding.rate_limit] -max_burst = 10 -tokens_per_second = 2.0 -min_wait_ms = 50 -max_wait_ms = 30000 -``` - -## 3. 实现计划 - -### 3.1 文件结构 - -``` -cortex-mem-core/src/embedding/ -├── mod.rs # 模块导出 -├── client.rs # 主客户端(修改) -├── cache.rs # 内存缓存(保留,简化) -├── persistent_cache.rs # 持久化缓存(新建) -└── rate_limiter.rs # 自适应速率限制(新建) -``` - -### 3.2 实现步骤 - -| 步骤 | 任务 | 预期效果 | -|------|------|----------| -| 1 | 实现自适应速率限制器 | 移除固定延迟,提升 30-50% 吞吐量 | -| 2 | 实现持久化缓存 | 减少 60-80% 重复 API 调用 | -| 3 | 重构 EmbeddingClient | 集成缓存和速率限制 | -| 4 | 添加配置支持 | 允许用户自定义策略 | -| 5 | 添加监控指标 | 支持性能调优 | - -### 3.3 测试要点 - -1. **单元测试**:速率限制器的令牌补充和消耗逻辑 -2. **集成测试**:缓存命中率、API 调用减少率 -3. **压力测试**:批量索引 1000 个文件的耗时对比 -4. **恢复测试**:模拟 429 错误时的自动退避 - -## 4. 预期收益 - -| 指标 | 当前 | 优化后 | 提升 | -|------|------|--------|------| -| 批量索引 100 文件 | ~100s | ~20-30s | 70%+ | -| 重复查询延迟 | 1-2s | <10ms | 99%+ | -| API 调用次数(有缓存) | 100% | 20-40% | 60-80% | -| 429 错误恢复时间 | 手动 | 自动 | N/A | - -## 5. 兼容性说明 - -1. **向后兼容**:新配置项有默认值,旧配置文件无需修改 -2. **渐进式启用**:缓存默认启用,可通过配置关闭 -3. **平滑迁移**:保留原有 `embed_batch` 方法签名,新增 `embed_batch_smart` diff --git a/docs_plan_v2.6/2_entity_query_recall_optimization.md b/docs_plan_v2.6/2_entity_query_recall_optimization.md deleted file mode 100644 index f1327fe..0000000 --- a/docs_plan_v2.6/2_entity_query_recall_optimization.md +++ /dev/null @@ -1,1022 +0,0 @@ -# 实体查询召回优化技术方案 - -## 1. 问题分析 - -### 1.1 L0 抽象层信息丢失问题 - -**位置**: `cortex-mem-core/src/layers/generator.rs` - -L0 抽象层设计为 ~100 tokens 的精简摘要,用于快速筛选候选。但在压缩过程中丢失了关键实体信息: - -```rust -// 当前 L0 prompt 要求 -"Cover MULTIPLE key aspects when content is rich (who, what, key topics, important outcomes)" -"Prioritize information breadth over depth" -``` - -**问题表现**: -- 用户查询"王明是谁"时,L0 摘要可能只写了"讨论了项目进展",丢失了具体人名 -- 导致 L0 层召回失败,需要降级到更低的阈值或全量 L2 检索 -- 影响检索效率和精度 - -### 1.2 Intent 分析实现过于简陋 - -**位置**: `cortex-mem-core/src/search/vector_engine.rs:600-644` - -当前 `detect_intent_type` 方法使用硬编码的关键词匹配: - -```rust -fn detect_intent_type(query: &str) -> QueryIntentType { - let lower = query.to_lowercase(); - - // 硬编码的关键词匹配 - if lower.contains("when") || lower.contains("recent") || ... { - return QueryIntentType::Temporal; - } - if lower.starts_with("what is") || lower.starts_with("who is") || ... { - return QueryIntentType::Factual; - } - // ... - QueryIntentType::General -} -``` - -**问题分析**: -1. **中文支持差**:只匹配英文关键词,中文查询几乎全部归类为 General -2. **语义理解缺失**:"告诉我上次我们讨论的技术方案" 无法被识别为 Temporal -3. **实体查询判断粗糙**:`is_likely_entity_query` 仅通过长度和字符类型判断 - -### 1.3 已有资源未被利用 - -**位置**: `cortex-mem-core/src/llm/prompts.rs:106-126` - -存在一个完整的 `intent_analysis` prompt,但从未被调用: - -```rust -/// Prompt for intent analysis in retrieval -pub fn intent_analysis(query: &str) -> String { - format!( - r#"Analyze the following query and extract: -1. **Keywords**: Important keywords for search (2-5 words) -2. **Entities**: Named entities mentioned (people, places, technologies) -3. **Time Range**: Any time-related constraints (if mentioned) -4. **Query Type**: The type of query (factual, procedural, conceptual, etc.) -... -``` - -**技术债务**:已有完善的 LLM intent 分析 prompt,却用简陋的关键词匹配替代。 - -### 1.4 实体识别与召回链路断裂 - -当前流程中,实体信息仅在记忆提取时识别,但在召回时未利用: - -``` -记忆存储时:识别实体 → 存入 metadata.entities -记忆召回时:查询 → 嵌入 → 向量相似度 → 返回结果 - ↑ - 未利用实体信息 -``` - -## 2. 解决方案 - -### 2.1 架构设计 - -``` -┌─────────────────────────────────────────────────────────────────────────────┐ -│ 实体感知召回架构 │ -├─────────────────────────────────────────────────────────────────────────────┤ -│ │ -│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │ -│ │ 用户查询 │───►│ LLM Intent │───►│ 实体增强 │───►│ 混合检索 │ │ -│ │ │ │ 分析器 │ │ 查询扩展 │ │ 策略选择 │ │ -│ └─────────────┘ └─────────────┘ └─────────────┘ └─────────────┘ │ -│ │ │ │ │ -│ ▼ ▼ ▼ │ -│ ┌──────────────────────────────────────────────────┐ │ -│ │ 检索执行层 │ │ -│ │ ┌─────────┐ ┌─────────┐ ┌─────────┐ │ │ -│ │ │实体过滤 │ │三层级联 │ │语义搜索 │ │ │ -│ │ │召回 │ │召回 │ │召回 │ │ │ -│ │ └─────────┘ └─────────┘ └─────────┘ │ │ -│ └──────────────────────────────────────────────────┘ │ -│ │ -└─────────────────────────────────────────────────────────────────────────────┘ -``` - -### 2.2 LLM Intent 分析器重构 - -#### 2.2.1 增强 Intent 分析结果结构 - -```rust -// 文件: cortex-mem-core/src/search/query_intent.rs (新建或重构) - -use serde::{Deserialize, Serialize}; - -/// 增强的查询意图分析结果 -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct EnhancedQueryIntent { - /// 原始查询 - pub original_query: String, - - /// 重写后的查询(用于语义搜索) - pub rewritten_query: Option, - - /// 查询类型 - pub query_type: QueryType, - - /// 提取的关键词 - pub keywords: Vec, - - /// 提取的命名实体 - pub entities: ExtractedEntities, - - /// 时间约束 - pub time_constraint: Option, - - /// 检索策略建议 - pub retrieval_strategy: RetrievalStrategy, - - /// 置信度 - pub confidence: f32, -} - -/// 查询类型(扩展版) -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -pub enum QueryType { - /// 事实查询:"Python 的创始人是谁?" - Factual, - /// 实体查询:"王明"、"那个项目" - EntityLookup, - /// 时间查询:"上周讨论了什么?" - Temporal, - /// 关系查询:"张三和李四是什么关系?" - Relational, - /// 过程查询:"如何配置环境?" - Procedural, - /// 概念查询:"什么是 RAG?" - Conceptual, - /// 搜索查询:"找一下关于微服务的讨论" - Search, - /// 比较/决策查询:"方案A和方案B哪个更好?" - Comparative, - /// 一般查询 - General, -} - -/// 提取的实体信息 -#[derive(Debug, Clone, Serialize, Deserialize, Default)] -pub struct ExtractedEntities { - /// 人名 - pub people: Vec, - /// 组织/公司 - pub organizations: Vec, - /// 项目/产品名 - pub projects: Vec, - /// 技术术语 - pub technologies: Vec, - /// 地点 - pub locations: Vec, - /// 其他实体 - pub other: Vec, -} - -impl ExtractedEntities { - pub fn is_empty(&self) -> bool { - self.people.is_empty() - && self.organizations.is_empty() - && self.projects.is_empty() - && self.technologies.is_empty() - && self.locations.is_empty() - && self.other.is_empty() - } - - pub fn all_entities(&self) -> Vec<&String> { - let mut entities = Vec::new(); - entities.extend(&self.people); - entities.extend(&self.organizations); - entities.extend(&self.projects); - entities.extend(&self.technologies); - entities.extend(&self.locations); - entities.extend(&self.other); - entities - } -} - -/// 时间约束 -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct TimeConstraint { - /// 时间表达式原文 - pub expression: String, - /// 解析后的起始时间 - pub start: Option>, - /// 解析后的结束时间 - pub end: Option>, - /// 相对时间描述 - pub relative: Option, // "last_week", "yesterday", "recent" -} - -/// 检索策略建议 -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -pub enum RetrievalStrategy { - /// 三层级联检索(默认) - LayeredCascade, - /// 实体优先检索 - EntityFirst, - /// 时间范围过滤优先 - TimeFiltered, - /// 全量语义检索 - FullSemantic, - /// 混合策略 - Hybrid { - entity_weight: f32, - semantic_weight: f32, - time_weight: f32, - }, -} -``` - -#### 2.2.2 LLM Intent 分析器实现 - -```rust -// 文件: cortex-mem-core/src/search/intent_analyzer.rs (新建) - -use crate::llm::{LLMClient, Prompts}; -use crate::search::query_intent::*; -use crate::Result; -use std::sync::Arc; -use serde::Deserialize; - -/// Intent 分析器 -pub struct IntentAnalyzer { - llm_client: Arc, - /// 是否启用 LLM 分析(可配置关闭以节省成本) - enabled: bool, - /// 缓存最近的 intent 分析结果 - cache: Arc>>, -} - -impl IntentAnalyzer { - pub fn new(llm_client: Arc) -> Self { - Self { - llm_client, - enabled: true, - cache: Arc::new(tokio::sync::RwLock::new( - lru::LruCache::new(std::num::NonZeroUsize::new(100).unwrap()) - )), - } - } - - /// 分析查询意图 - pub async fn analyze(&self, query: &str) -> Result { - // 1. 检查缓存 - { - let cache = self.cache.read().await; - if let Some(intent) = cache.peek(query) { - return Ok(intent.clone()); - } - } - - // 2. 使用 LLM 进行深度分析 - let intent = if self.enabled { - self.analyze_with_llm(query).await? - } else { - // 回退到规则引擎 - self.analyze_with_rules(query) - }; - - // 3. 缓存结果 - { - let mut cache = self.cache.write().await; - cache.put(query.to_string(), intent.clone()); - } - - Ok(intent) - } - - async fn analyze_with_llm(&self, query: &str) -> Result { - let prompt = Self::build_intent_prompt(query); - - let response = self.llm_client.complete_with_system( - "你是一个专业的查询意图分析器,帮助理解用户在记忆系统中的检索意图。", - &prompt - ).await?; - - // 解析 LLM 响应 - self.parse_llm_response(query, &response) - } - - fn build_intent_prompt(query: &str) -> String { - format!( - r#"分析以下查询,提取关键信息用于记忆检索系统。 - -## 输入查询 -{} - -## 分析任务 -请识别以下信息: - -1. **查询类型**:从以下类型中选择最匹配的 - - factual: 事实性问题("谁"、"什么"、"多少") - - entity_lookup: 查找特定实体(人名、项目名等) - - temporal: 有时间约束的查询 - - relational: 查询实体间关系 - - procedural: 操作步骤类问题 - - conceptual: 概念解释类 - - search: 搜索类请求 - - comparative: 比较/决策类 - - general: 通用查询 - -2. **命名实体**:提取人名、组织、项目、技术术语、地点等 - -3. **时间约束**:识别时间表达式并解析 - -4. **关键词**:提取 2-5 个最重要的搜索关键词 - -5. **查询重写**:如果原查询模糊,生成一个更明确的版本 - -6. **检索策略建议**:推荐最合适的检索方法 - -## 输出格式(JSON) -{{ - "query_type": "类型名称", - "entities": {{ - "people": ["人名1"], - "organizations": ["组织名"], - "projects": ["项目名"], - "technologies": ["技术术语"], - "locations": ["地点"], - "other": ["其他实体"] - }}, - "time_constraint": {{ - "expression": "时间原文", - "relative": "相对时间(如 last_week)" - }}, - "keywords": ["关键词1", "关键词2"], - "rewritten_query": "重写后的查询(如果原查询清晰则为 null)", - "retrieval_strategy": {{ - "type": "策略名称", - "params": {{}} - }}, - "confidence": 0.95 -}} - -## 注意事项 -- 中文查询请用中文分析 -- 实体识别要准确,不确定的放入 other -- 时间表达式要尽量解析为具体范围 -- confidence 反映你对分析的确定程度 - -请直接输出 JSON,不要有其他内容。"#, - query - ) - } - - fn parse_llm_response(&self, query: &str, response: &str) -> Result { - // 提取 JSON - let json_str = Self::extract_json(response); - - // 解析 - let parsed: LLMIntentResponse = serde_json::from_str(json_str) - .map_err(|e| crate::Error::Search(format!("Failed to parse intent: {}", e)))?; - - Ok(EnhancedQueryIntent { - original_query: query.to_string(), - rewritten_query: parsed.rewritten_query, - query_type: parsed.query_type, - keywords: parsed.keywords, - entities: parsed.entities, - time_constraint: parsed.time_constraint, - retrieval_strategy: self.parse_strategy(&parsed.retrieval_strategy), - confidence: parsed.confidence, - }) - } - - fn extract_json(response: &str) -> &str { - let trimmed = response.trim(); - if let Some(start) = trimmed.find('{') { - let mut depth = 0; - for (i, c) in trimmed[start..].char_indices() { - match c { - '{' => depth += 1, - '}' => { - depth -= 1; - if depth == 0 { - return &trimmed[start..start + i + 1]; - } - } - _ => {} - } - } - } - trimmed - } - - fn parse_strategy(&self, strategy: &StrategyResponse) -> RetrievalStrategy { - match strategy.type_.as_str() { - "entity_first" => RetrievalStrategy::EntityFirst, - "time_filtered" => RetrievalStrategy::TimeFiltered, - "full_semantic" => RetrievalStrategy::FullSemantic, - "hybrid" => { - let params = &strategy.params; - RetrievalStrategy::Hybrid { - entity_weight: params.get("entity_weight").and_then(|v| v.as_f64()).unwrap_or(0.4) as f32, - semantic_weight: params.get("semantic_weight").and_then(|v| v.as_f64()).unwrap_or(0.4) as f32, - time_weight: params.get("time_weight").and_then(|v| v.as_f64()).unwrap_or(0.2) as f32, - } - } - _ => RetrievalStrategy::LayeredCascade, - } - } - - /// 规则引擎回退(当 LLM 不可用时) - fn analyze_with_rules(&self, query: &str) -> EnhancedQueryIntent { - let mut intent = EnhancedQueryIntent { - original_query: query.to_string(), - rewritten_query: None, - query_type: QueryType::General, - keywords: Vec::new(), - entities: ExtractedEntities::default(), - time_constraint: None, - retrieval_strategy: RetrievalStrategy::LayeredCascade, - confidence: 0.5, - }; - - // 简单的规则匹配 - let lower = query.to_lowercase(); - - // 时间关键词检测 - let time_keywords = ["上次", "昨天", "上周", "最近", "last", "yesterday", "week", "recent"]; - if time_keywords.iter().any(|k| lower.contains(k)) { - intent.query_type = QueryType::Temporal; - intent.retrieval_strategy = RetrievalStrategy::TimeFiltered; - } - - // 实体查询检测 - if query.chars().count() <= 6 && !query.contains(' ') && !query.contains('?') { - intent.query_type = QueryType::EntityLookup; - intent.retrieval_strategy = RetrievalStrategy::EntityFirst; - // 假设短查询是实体名 - intent.entities.other.push(query.to_string()); - } - - // 提取关键词 - intent.keywords = query - .split_whitespace() - .filter(|w| w.len() > 1) - .map(|s| s.to_lowercase()) - .take(5) - .collect(); - - intent - } -} - -// LLM 响应结构 -#[derive(Debug, Deserialize)] -struct LLMIntentResponse { - #[serde(rename = "query_type")] - query_type: QueryType, - entities: ExtractedEntities, - time_constraint: Option, - keywords: Vec, - rewritten_query: Option, - retrieval_strategy: StrategyResponse, - confidence: f32, -} - -#[derive(Debug, Deserialize)] -struct StrategyResponse { - #[serde(rename = "type")] - type_: String, - params: serde_json::Map, -} -``` - -### 2.3 L0 抽象层实体保留增强 - -#### 2.3.1 改进 L0 生成 Prompt - -```rust -// 文件: cortex-mem-core/src/llm/prompts.rs (修改) - -impl Prompts { - /// Prompt for generating L0 abstract with entity preservation - pub fn abstract_generation_with_entities(content: &str, known_entities: &[String]) -> String { - let entities_hint = if known_entities.is_empty() { - String::new() - } else { - format!( - "\n**已知实体(必须保留)**:{}\n", - known_entities.join(", ") - ) - }; - - format!( - r#"生成一个精炼的摘要(约100个token),用于快速相关性检测。 - -## 要求 -1. **实体优先**:必须包含内容中提到的所有重要实体(人名、项目名、技术术语等) -2. **信息广度**:覆盖多个关键方面,而非深入单一主题 -3. **紧凑表达**:使用"讨论了X、Y和Z"而非冗长描述 -4. **语言一致**:使用与输入内容相同的语言 -{} -## 内容 -{} - -## 摘要(约100 token)"#, - entities_hint, content - ) - } - - /// 从 L1 概览中提取实体列表 - pub fn entity_extraction_from_overview(overview: &str) -> String { - format!( - r#"从以下概览中提取所有命名实体。 - -## 输出格式(JSON) -{{ - "people": ["人名"], - "organizations": ["组织名"], - "projects": ["项目名"], - "technologies": ["技术术语"], - "locations": ["地点"], - "other": ["其他实体"] -}} - -## 概览内容 -{} - -## 实体列表(JSON)"#, - overview - ) - } -} -``` - -#### 2.3.2 层级生成器改进 - -```rust -// 文件: cortex-mem-core/src/layers/generator.rs (修改) - -impl AbstractGenerator { - /// 生成 L0 抽象(带实体保留) - pub async fn generate_with_entities( - &self, - content: &str, - llm: &Arc, - existing_entities: Option<&[String]>, - ) -> Result { - info!("Generating L0 Abstract with entity preservation"); - - // 1. 如果提供了已有实体,使用增强 prompt - let prompt = if let Some(entities) = existing_entities { - Prompts::abstract_generation_with_entities(content, entities) - } else { - Prompts::abstract_generation(content) - }; - - // 2. 调用 LLM - let abstract_text = llm.complete_with_system( - "你是专业的摘要生成器,擅长在有限篇幅内保留关键实体信息。", - &prompt - ).await?; - - // 3. 从摘要中提取实体(用于后续索引) - let entities = self.extract_entities_from_abstract(&abstract_text, llm).await?; - - Ok(AbstractResult { - content: abstract_text, - entities, - token_count: Self::estimate_tokens(&abstract_text), - }) - } - - async fn extract_entities_from_abstract( - &self, - abstract_text: &str, - llm: &Arc, - ) -> Result> { - let prompt = format!( - r#"从以下摘要中提取所有命名实体(人名、组织、项目、技术术语等)。 - -摘要: -{} - -输出格式(JSON数组): -["实体1", "实体2", ...]"#, - abstract_text - ); - - let response = llm.complete(&prompt).await?; - - // 解析 JSON 数组 - let json_str = response.trim() - .trim_start_matches('[') - .trim_end_matches(']'); - - if json_str.is_empty() { - return Ok(Vec::new()); - } - - serde_json::from_str(&format!("[{}]", json_str)) - .map_err(|e| crate::Error::Llm(format!("Failed to parse entities: {}", e))) - } -} - -#[derive(Debug, Clone)] -pub struct AbstractResult { - pub content: String, - pub entities: Vec, - pub token_count: usize, -} -``` - -### 2.4 实体感知检索策略 - -#### 2.4.1 混合检索执行器 - -```rust -// 文件: cortex-mem-core/src/search/retrieval_executor.rs (新建) - -use crate::search::query_intent::*; -use crate::search::{VectorSearchEngine, SearchOptions, SearchResult}; -use crate::vector_store::QdrantVectorStore; -use crate::Result; -use std::sync::Arc; -use tracing::{info, debug}; - -/// 检索执行器 -pub struct RetrievalExecutor { - vector_engine: Arc, - vector_store: Arc, -} - -impl RetrievalExecutor { - pub fn new( - vector_engine: Arc, - vector_store: Arc, - ) -> Self { - Self { - vector_engine, - vector_store, - } - } - - /// 根据策略执行检索 - pub async fn execute( - &self, - query: &str, - intent: &EnhancedQueryIntent, - options: &SearchOptions, - ) -> Result> { - info!("Executing retrieval with strategy: {:?}", intent.retrieval_strategy); - - match &intent.retrieval_strategy { - RetrievalStrategy::EntityFirst => { - self.entity_first_search(query, intent, options).await - } - RetrievalStrategy::TimeFiltered => { - self.time_filtered_search(query, intent, options).await - } - RetrievalStrategy::FullSemantic => { - self.full_semantic_search(query, intent, options).await - } - RetrievalStrategy::Hybrid { entity_weight, semantic_weight, time_weight } => { - self.hybrid_search(query, intent, options, *entity_weight, *semantic_weight, *time_weight).await - } - RetrievalStrategy::LayeredCascade => { - // 默认三层级联检索 - self.vector_engine.layered_semantic_search(query, options).await - } - } - } - - /// 实体优先检索 - async fn entity_first_search( - &self, - query: &str, - intent: &EnhancedQueryIntent, - options: &SearchOptions, - ) -> Result> { - let all_entities = intent.entities.all_entities(); - - if all_entities.is_empty() { - // 没有实体,回退到语义搜索 - return self.vector_engine.semantic_search(query, options).await; - } - - debug!("Entity-first search for entities: {:?}", all_entities); - - // 1. 先用实体过滤检索 - let entity_filters = self.build_entity_filters(&intent.entities); - let entity_results = self.vector_store - .search_by_entities(&entity_filters, options.limit * 2) - .await?; - - // 2. 再用语义相似度排序 - let query_embedding = self.vector_engine.get_embedding(query).await?; - - let mut scored_results: Vec<_> = entity_results - .into_iter() - .map(|mem| { - let semantic_score = cosine_similarity(&query_embedding, &mem.embedding); - let entity_match_score = self.calculate_entity_match_score(&mem, &intent.entities); - - // 综合评分:实体匹配 40% + 语义相似 60% - let combined = entity_match_score * 0.4 + semantic_score * 0.6; - - SearchResult { - uri: mem.metadata.uri.unwrap_or(mem.id), - score: combined, - snippet: extract_snippet(&mem.content, query), - content: Some(mem.content), - } - }) - .collect(); - - // 3. 排序并返回 - scored_results.sort_by(|a, b| b.score.partial_cmp(&a.score).unwrap()); - scored_results.truncate(options.limit); - - Ok(scored_results) - } - - /// 时间过滤检索 - async fn time_filtered_search( - &self, - query: &str, - intent: &EnhancedQueryIntent, - options: &SearchOptions, - ) -> Result> { - let time_filter = match &intent.time_constraint { - Some(tc) => tc, - None => return self.vector_engine.layered_semantic_search(query, options).await, - }; - - debug!("Time-filtered search for: {:?}", time_filter); - - // 构建时间范围过滤器 - let mut filters = crate::types::Filters::default(); - - if let Some(start) = time_filter.start { - filters.created_after = Some(start); - } - if let Some(end) = time_filter.end { - filters.created_before = Some(end); - } - - // 执行带时间过滤的搜索 - self.vector_engine.filtered_semantic_search(query, &filters, options).await - } - - /// 混合检索 - async fn hybrid_search( - &self, - query: &str, - intent: &EnhancedQueryIntent, - options: &SearchOptions, - entity_weight: f32, - semantic_weight: f32, - time_weight: f32, - ) -> Result> { - let mut candidates: std::collections::HashMap = - std::collections::HashMap::new(); - - // 1. 实体检索分支 - if entity_weight > 0.0 && !intent.entities.all_entities().is_empty() { - let entity_results = self.entity_first_search(query, intent, &SearchOptions { - limit: options.limit * 2, - ..options.clone() - }).await?; - - for result in entity_results { - let entry = candidates.entry(result.uri.clone()).or_default(); - entry.uri = result.uri.clone(); - entry.entity_score = Some(result.score); - entry.content = result.content.clone(); - } - } - - // 2. 语义检索分支 - if semantic_weight > 0.0 { - let semantic_results = self.vector_engine.layered_semantic_search(query, &SearchOptions { - limit: options.limit * 2, - ..options.clone() - }).await?; - - for result in semantic_results { - let entry = candidates.entry(result.uri.clone()).or_default(); - entry.uri = result.uri.clone(); - entry.semantic_score = Some(result.score); - entry.content = result.content.clone(); - } - } - - // 3. 时间过滤分支 - if time_weight > 0.0 && intent.time_constraint.is_some() { - let time_results = self.time_filtered_search(query, intent, &SearchOptions { - limit: options.limit * 2, - ..options.clone() - }).await?; - - for result in time_results { - let entry = candidates.entry(result.uri.clone()).or_default(); - entry.uri = result.uri.clone(); - entry.time_score = Some(result.score); - entry.content = result.content.clone(); - } - } - - // 4. 综合评分 - let mut final_results: Vec<_> = candidates - .into_values() - .map(|c| { - let entity = c.entity_score.unwrap_or(0.0) * entity_weight; - let semantic = c.semantic_score.unwrap_or(0.0) * semantic_weight; - let time = c.time_score.unwrap_or(0.0) * time_weight; - let total = entity + semantic + time; - - SearchResult { - uri: c.uri, - score: total, - snippet: String::new(), - content: c.content, - } - }) - .filter(|r| r.score >= options.threshold) - .collect(); - - final_results.sort_by(|a, b| b.score.partial_cmp(&a.score).unwrap()); - final_results.truncate(options.limit); - - Ok(final_results) - } - - fn build_entity_filters(&self, entities: &ExtractedEntities) -> EntityFilters { - EntityFilters { - people: entities.people.clone(), - organizations: entities.organizations.clone(), - projects: entities.projects.clone(), - technologies: entities.technologies.clone(), - } - } - - fn calculate_entity_match_score(&self, memory: &crate::types::Memory, entities: &ExtractedEntities) -> f32 { - let metadata_entities = &memory.metadata.entities; - let query_entities = entities.all_entities(); - - if query_entities.is_empty() { - return 0.5; - } - - let matches = query_entities - .iter() - .filter(|qe| metadata_entities.iter().any(|me| me.to_lowercase() == qe.to_lowercase())) - .count(); - - matches as f32 / query_entities.len() as f32 - } -} - -#[derive(Default)] -struct HybridCandidate { - uri: String, - entity_score: Option, - semantic_score: Option, - time_score: Option, - content: Option, -} - -fn cosine_similarity(a: &[f32], b: &[f32]) -> f32 { - let dot: f32 = a.iter().zip(b.iter()).map(|(x, y)| x * y).sum(); - let mag_a: f32 = a.iter().map(|x| x * x).sum::().sqrt(); - let mag_b: f32 = b.iter().map(|x| x * x).sum::().sqrt(); - if mag_a == 0.0 || mag_b == 0.0 { 0.0 } else { dot / (mag_a * mag_b) } -} - -fn extract_snippet(content: &str, query: &str) -> String { - // 复用现有的 snippet 提取逻辑 - if content.chars().count() > 200 { - format!("{}...", content.chars().take(200).collect::()) - } else { - content.to_string() - } -} -``` - -### 2.5 集成到现有搜索引擎 - -```rust -// 文件: cortex-mem-core/src/search/vector_engine.rs (修改) - -impl VectorSearchEngine { - /// 智能检索入口(使用 Intent 分析) - pub async fn smart_search( - &self, - query: &str, - options: &SearchOptions, - intent_analyzer: Option<&IntentAnalyzer>, - ) -> Result> { - // 1. Intent 分析 - let intent = if let Some(analyzer) = intent_analyzer { - analyzer.analyze(query).await? - } else { - // 回退到旧的简单分析 - self.simple_intent_analysis(query).await - }; - - info!( - "Smart search intent: type={:?}, strategy={:?}, entities={}", - intent.query_type, - intent.retrieval_strategy, - intent.entities.all_entities().len() - ); - - // 2. 根据策略选择检索路径 - let search_query = intent.rewritten_query.as_deref().unwrap_or(query); - - // 3. 执行检索 - match &intent.retrieval_strategy { - RetrievalStrategy::EntityFirst => { - // 使用实体优先检索 - self.entity_aware_search(search_query, &intent, options).await - } - _ => { - // 使用增强的三层级联检索 - self.enhanced_layered_search(search_query, &intent, options).await - } - } - } - - /// 实体感知的层级检索 - async fn enhanced_layered_search( - &self, - query: &str, - intent: &EnhancedQueryIntent, - options: &SearchOptions, - ) -> Result> { - // 调用现有的 layered_semantic_search,但使用 intent 中的信息 - let mut enhanced_options = options.clone(); - - // 根据意图类型调整阈值 - enhanced_options.threshold = match intent.query_type { - QueryType::EntityLookup => 0.3, // 实体查询降低阈值 - QueryType::Temporal => 0.35, - _ => options.threshold, - }; - - self.layered_semantic_search(query, &enhanced_options).await - } -} -``` - -## 3. 配置支持 - -```toml -# config.toml 新增配置 - -[search] -# Intent 分析模式 -intent_analysis_mode = "llm" # llm, rules, hybrid - -# Intent 分析缓存 -intent_cache_size = 100 - -# 实体检索权重 -[search.entity_retrieval] -enabled = true -entity_match_weight = 0.4 -semantic_weight = 0.6 - -# L0 实体保留 -[search.l0_entity_preservation] -enabled = true -min_entities = 3 -``` - -## 4. 实现计划 - -| 步骤 | 任务 | 依赖 | -|------|------|------| -| 1 | 实现 EnhancedQueryIntent 结构 | 无 | -| 2 | 实现 IntentAnalyzer(含 LLM 分析) | 步骤1 | -| 3 | 修改 L0 生成器,添加实体保留 | 无 | -| 4 | 实现 RetrievalExecutor | 步骤1, 2 | -| 5 | 集成到 VectorSearchEngine | 步骤2, 4 | -| 6 | 添加配置支持 | 全部 | - -## 5. 预期收益 - -| 场景 | 当前表现 | 优化后 | 提升 | -|------|----------|--------|------| -| "王明是谁" | 召回失败或低分 | 高召回率 | +60%+ | -| "上周讨论的技术方案" | 归类错误 | 准确识别+时间过滤 | +40% | -| 中文意图识别 | ~30% 准确 | ~85% 准确 | +55pp | -| LLM 开销 | 无 | 每次查询 1 次 | 可控 | - -## 6. 技术债务清理 - -1. **删除** `detect_intent_type` 方法中的硬编码关键词匹配 -2. **启用** 已存在但未使用的 `Prompts::intent_analysis` -3. **统一** Intent 分析逻辑,移除重复代码 -4. **添加** 完善的单元测试和集成测试 diff --git a/docs_plan_v2.6/3_memory_forgetting_mechanism.md b/docs_plan_v2.6/3_memory_forgetting_mechanism.md deleted file mode 100644 index cde500c..0000000 --- a/docs_plan_v2.6/3_memory_forgetting_mechanism.md +++ /dev/null @@ -1,863 +0,0 @@ -# 记忆遗忘机制设计技术方案 - -## 1. 问题分析 - -### 1.1 现有元数据未被充分利用 - -**位置**: `cortex-mem-core/src/memory_index.rs` - -当前 `MemoryMetadata` 已定义了访问统计字段,但未实现基于此的衰减或遗忘逻辑: - -```rust -pub struct MemoryMetadata { - pub last_accessed: DateTime, // 有字段 - pub access_count: u32, // 有字段 - pub confidence: f32, // 有字段 - // 缺少: - // - importance_decay (重要性衰减) - // - ttl (存活时间) - // - last_updated (内容更新时间) - // - memory_phase (记忆阶段) -} -``` - -### 1.2 长期运行的问题 - -对于持续数月的 Coding Agent(如 Cursor、Claude Code),会积累大量记忆: - -| 时间跨度 | 预估记忆量 | 问题 | -|----------|------------|------| -| 1 个月 | ~500 条 | 检索效率下降 | -| 3 个月 | ~1500 条 | 召回精度下降,噪声增加 | -| 6 个月 | ~3000 条 | 存储膨胀,过时信息干扰 | -| 1 年 | ~6000 条 | 系统性能显著下降 | - -### 1.3 过时信息的危害 - -1. **召回噪声**:已废弃的项目配置、过时的技术决策干扰当前查询 -2. **存储浪费**:无价值信息占用向量索引空间 -3. **上下文污染**:Agent 可能基于过时信息做出错误决策 - -## 2. 理论基础 - -### 2.1 艾宾浩斯遗忘曲线 - -记忆强度随时间自然衰减,呈现指数曲线: - -``` -记忆强度 = 初始强度 × e^(-t/τ) - -其中: -- t: 距上次访问的时间 -- τ: 时间常数(半衰期) -``` - -### 2.2 记忆巩固理论 - -频繁访问的记忆会被"巩固",衰减速度减慢: - -``` -衰减速度 ∝ 1 / log(1 + access_count) -``` - -### 2.3 工作记忆与长期记忆 - -``` -┌─────────────────────────────────────────────────────────────────────────────┐ -│ 记忆生命周期模型 │ -├─────────────────────────────────────────────────────────────────────────────┤ -│ │ -│ 新记忆 ──► 工作记忆 ──► 短期记忆 ──► 长期记忆 ──► 归档 │ -│ │ │ │ │ │ │ -│ │ │ │ │ │ │ -│ │ 高频访问 中频访问 低频访问 极少访问 │ -│ │ 快速衰减 中速衰减 慢速衰减 仅查询可见 │ -│ │ TTL: 7天 TTL: 30天 TTL: 90天 永久保留 │ -│ │ │ │ │ │ │ -│ └──────────┴────────────┴────────────┴────────────┘ │ -│ │ │ -│ ▼ │ -│ 自动遗忘清理 │ -│ │ -└─────────────────────────────────────────────────────────────────────────────┘ -``` - -## 3. 解决方案 - -### 3.1 增强的记忆元数据 - -```rust -// 文件: cortex-mem-core/src/memory_index.rs (修改) - -use serde::{Deserialize, Serialize}; -use chrono::{DateTime, Utc}; - -/// 记忆阶段 -#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] -pub enum MemoryPhase { - /// 工作记忆:当前会话频繁使用 - Working, - /// 短期记忆:近期使用,可能过渡到长期 - ShortTerm, - /// 长期记忆:已巩固,衰减慢 - LongTerm, - /// 归档记忆:极少访问,仅特定查询可见 - Archived, -} - -impl Default for MemoryPhase { - fn default() -> Self { - MemoryPhase::Working - } -} - -/// 遗忘策略配置 -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ForgettingConfig { - /// 工作记忆半衰期(天) - pub working_half_life: f32, - /// 短期记忆半衰期(天) - pub short_term_half_life: f32, - /// 长期记忆半衰期(天) - pub long_term_half_life: f32, - /// 巩固阈值:访问次数达到此值后进入下一阶段 - pub consolidation_threshold: u32, - /// 遗忘阈值:强度低于此值的记忆将被清理 - pub forgetting_threshold: f32, - /// 归档阈值:强度低于此值进入归档 - pub archive_threshold: f32, - /// 是否启用自动清理 - pub auto_cleanup_enabled: bool, - /// 清理周期(小时) - pub cleanup_interval_hours: u64, -} - -impl Default for ForgettingConfig { - fn default() -> Self { - Self { - working_half_life: 7.0, // 7 天半衰期 - short_term_half_life: 30.0, // 30 天半衰期 - long_term_half_life: 180.0, // 180 天半衰期 - consolidation_threshold: 5, // 访问 5 次后巩固 - forgetting_threshold: 0.1, // 强度低于 0.1 删除 - archive_threshold: 0.3, // 强度低于 0.3 归档 - auto_cleanup_enabled: true, - cleanup_interval_hours: 24, - } - } -} - -/// 增强的记忆元数据 -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct EnhancedMemoryMetadata { - // === 原有字段 === - pub id: String, - pub file: String, - pub memory_type: MemoryType, - pub key: String, - pub content_hash: String, - pub source_sessions: Vec, - pub created_at: DateTime, - pub updated_at: DateTime, - pub last_accessed: DateTime, - pub access_count: u32, - pub confidence: f32, - pub content_summary: String, - - // === 新增字段 === - /// 记忆阶段 - pub phase: MemoryPhase, - /// 重要性评分(用户可手动调整) - pub importance: f32, - /// 衰减后的当前强度 - pub current_strength: f32, - /// 最后强度计算时间 - pub strength_updated_at: DateTime, - /// 是否标记为重要(永不遗忘) - pub is_pinned: bool, - /// 自定义 TTL(秒),None 表示使用默认 - pub custom_ttl: Option, - /// 关联的其他记忆 ID - pub related_memories: Vec, - /// 标签(用于分类遗忘) - pub tags: Vec, - /// 内容过期时间(用于时序性信息) - pub expires_at: Option>, -} - -impl EnhancedMemoryMetadata { - /// 创建新记忆 - pub fn new( - id: String, - file: String, - memory_type: MemoryType, - key: String, - content_hash: String, - source_session: &str, - confidence: f32, - content_summary: String, - ) -> Self { - let now = Utc::now(); - Self { - id, - file, - memory_type, - key, - content_hash, - source_sessions: vec![source_session.to_string()], - created_at: now, - updated_at: now, - last_accessed: now, - access_count: 0, - confidence, - content_summary, - - // 新字段默认值 - phase: MemoryPhase::Working, - importance: 0.5, - current_strength: 1.0, - strength_updated_at: now, - is_pinned: false, - custom_ttl: None, - related_memories: Vec::new(), - tags: Vec::new(), - expires_at: None, - } - } - - /// 从旧元数据迁移 - pub fn from_legacy(legacy: MemoryMetadata) -> Self { - Self { - id: legacy.id, - file: legacy.file, - memory_type: legacy.memory_type, - key: legacy.key, - content_hash: legacy.content_hash, - source_sessions: legacy.source_sessions, - created_at: legacy.created_at, - updated_at: legacy.updated_at, - last_accessed: legacy.last_accessed, - access_count: legacy.access_count, - confidence: legacy.confidence, - content_summary: legacy.content_summary, - - phase: Self::infer_phase_from_access(legacy.access_count), - importance: legacy.confidence, - current_strength: 1.0, - strength_updated_at: Utc::now(), - is_pinned: false, - custom_ttl: None, - related_memories: Vec::new(), - tags: Vec::new(), - expires_at: None, - } - } - - fn infer_phase_from_access(access_count: u32) -> MemoryPhase { - match access_count { - 0..=2 => MemoryPhase::Working, - 3..=10 => MemoryPhase::ShortTerm, - _ => MemoryPhase::LongTerm, - } - } -} -``` - -### 3.2 遗忘计算引擎 - -```rust -// 文件: cortex-mem-core/src/forgetting/engine.rs (新建) - -use crate::memory_index::{EnhancedMemoryMetadata, MemoryPhase, ForgettingConfig}; -use chrono::{DateTime, Utc}; -use std::sync::Arc; - -/// 遗忘计算引擎 -pub struct ForgettingEngine { - config: ForgettingConfig, -} - -impl ForgettingEngine { - pub fn new(config: ForgettingConfig) -> Self { - Self { config } - } - - /// 计算记忆的当前强度 - /// - /// 强度 = 基础强度 × 衰减因子 × 巩固加成 - pub fn calculate_strength(&self, memory: &EnhancedMemoryMetadata) -> f32 { - if memory.is_pinned { - return 1.0; // 固定的记忆永不衰减 - } - - // 1. 计算时间衰减 - let days_since_access = (Utc::now() - memory.last_accessed).num_days() as f32; - let half_life = self.get_half_life(memory.phase); - let decay_factor = (-days_since_access / half_life).exp(); - - // 2. 计算巩固加成(访问次数越多,衰减越慢) - let consolidation_bonus = 1.0 + (memory.access_count as f32).ln().max(0.0) * 0.1; - - // 3. 计算重要性加成 - let importance_factor = memory.importance * 0.5 + 0.5; // 0.5 ~ 1.0 - - // 4. 综合强度 - let strength = memory.confidence * decay_factor * consolidation_bonus * importance_factor; - - // 5. 检查是否过期 - if let Some(expires_at) = memory.expires_at { - if Utc::now() > expires_at { - return 0.0; // 已过期 - } - } - - strength.clamp(0.0, 1.0) - } - - /// 获取对应阶段的半衰期 - fn get_half_life(&self, phase: MemoryPhase) -> f32 { - match phase { - MemoryPhase::Working => self.config.working_half_life, - MemoryPhase::ShortTerm => self.config.short_term_half_life, - MemoryPhase::LongTerm => self.config.long_term_half_life, - MemoryPhase::Archived => self.config.long_term_half_life * 2.0, - } - } - - /// 判断记忆是否应该被遗忘 - pub fn should_forget(&self, memory: &EnhancedMemoryMetadata) -> bool { - if memory.is_pinned { - return false; - } - - let strength = self.calculate_strength(memory); - strength < self.config.forgetting_threshold - } - - /// 判断记忆是否应该归档 - pub fn should_archive(&self, memory: &EnhancedMemoryMetadata) -> bool { - if memory.is_pinned || memory.phase == MemoryPhase::Archived { - return false; - } - - let strength = self.calculate_strength(memory); - strength < self.config.archive_threshold && strength >= self.config.forgetting_threshold - } - - /// 判断记忆是否应该巩固(提升阶段) - pub fn should_consolidate(&self, memory: &EnhancedMemoryMetadata) -> Option { - if memory.access_count >= self.config.consolidation_threshold { - match memory.phase { - MemoryPhase::Working => Some(MemoryPhase::ShortTerm), - MemoryPhase::ShortTerm => Some(MemoryPhase::LongTerm), - MemoryPhase::LongTerm | MemoryPhase::Archived => None, - } - } else { - None - } - } - - /// 更新记忆的强度和阶段 - pub fn update_memory_state(&self, memory: &mut EnhancedMemoryMetadata) -> MemoryStateChange { - let mut changes = MemoryStateChange::default(); - - // 计算新强度 - let new_strength = self.calculate_strength(memory); - let old_strength = memory.current_strength; - memory.current_strength = new_strength; - memory.strength_updated_at = Utc::now(); - - if (new_strength - old_strength).abs() > 0.01 { - changes.strength_changed = true; - } - - // 检查巩固 - if let Some(new_phase) = self.should_consolidate(memory) { - memory.phase = new_phase; - changes.phase_changed = true; - changes.new_phase = Some(new_phase); - } - - // 检查归档 - if self.should_archive(memory) { - memory.phase = MemoryPhase::Archived; - changes.phase_changed = true; - changes.new_phase = Some(MemoryPhase::Archived); - changes.archived = true; - } - - // 检查遗忘 - changes.should_forget = self.should_forget(memory); - - changes - } -} - -#[derive(Debug, Clone, Default)] -pub struct MemoryStateChange { - pub strength_changed: bool, - pub phase_changed: bool, - pub new_phase: Option, - pub archived: bool, - pub should_forget: bool, -} - -/// 遗忘操作类型 -#[derive(Debug, Clone)] -pub enum ForgettingAction { - /// 删除记忆 - Delete { memory_id: String }, - /// 归档记忆 - Archive { memory_id: String }, - /// 巩固记忆(提升阶段) - Consolidate { memory_id: String, new_phase: MemoryPhase }, - /// 更新强度 - UpdateStrength { memory_id: String, new_strength: f32 }, - /// 无操作 - None, -} -``` - -### 3.3 记忆清理服务 - -```rust -// 文件: cortex-mem-core/src/forgetting/cleanup_service.rs (新建) - -use crate::forgetting::{ForgettingEngine, ForgettingAction, ForgettingConfig}; -use crate::memory_index_manager::MemoryIndexManager; -use crate::memory_events::{MemoryEvent, ChangeType, DeleteReason}; -use crate::vector_store::QdrantVectorStore; -use crate::filesystem::{CortexFilesystem, FilesystemOperations}; -use crate::Result; -use std::sync::Arc; -use tokio::sync::mpsc; -use tracing::{info, warn, debug}; - -/// 记忆清理服务 -pub struct MemoryCleanupService { - engine: ForgettingEngine, - index_manager: Arc, - vector_store: Arc, - filesystem: Arc, - event_tx: mpsc::UnboundedSender, - config: ForgettingConfig, -} - -/// 清理统计 -#[derive(Debug, Clone, Default)] -pub struct CleanupStats { - pub total_scanned: usize, - pub deleted: usize, - pub archived: usize, - pub consolidated: usize, - pub strength_updated: usize, - pub errors: usize, -} - -impl MemoryCleanupService { - pub fn new( - config: ForgettingConfig, - index_manager: Arc, - vector_store: Arc, - filesystem: Arc, - event_tx: mpsc::UnboundedSender, - ) -> Self { - Self { - engine: ForgettingEngine::new(config.clone()), - index_manager, - vector_store, - filesystem, - event_tx, - config, - } - } - - /// 启动定期清理任务 - pub fn start_background_cleanup(self: Arc) -> tokio::task::JoinHandle<()> { - tokio::spawn(async move { - let interval = std::time::Duration::from_secs(self.config.cleanup_interval_hours * 3600); - - loop { - tokio::time::sleep(interval).await; - - if !self.config.auto_cleanup_enabled { - continue; - } - - info!("Starting scheduled memory cleanup"); - - match self.run_cleanup().await { - Ok(stats) => { - info!( - "Cleanup completed: {} scanned, {} deleted, {} archived, {} consolidated", - stats.total_scanned, stats.deleted, stats.archived, stats.consolidated - ); - } - Err(e) => { - warn!("Cleanup failed: {}", e); - } - } - } - }) - } - - /// 执行一次清理 - pub async fn run_cleanup(&self) -> Result { - let mut stats = CleanupStats::default(); - - // 1. 扫描所有维度的记忆 - let scopes = [ - (crate::memory_index::MemoryScope::User, "user"), - (crate::memory_index::MemoryScope::Agent, "agent"), - ]; - - for (scope, owner_prefix) in scopes { - let owners = self.index_manager.list_owners(&scope).await?; - - for owner_id in owners { - let owner_stats = self.cleanup_owner_memories(&scope, &owner_id).await?; - stats.total_scanned += owner_stats.total_scanned; - stats.deleted += owner_stats.deleted; - stats.archived += owner_stats.archived; - stats.consolidated += owner_stats.consolidated; - stats.strength_updated += owner_stats.strength_updated; - stats.errors += owner_stats.errors; - } - } - - Ok(stats) - } - - /// 清理特定所有者的记忆 - async fn cleanup_owner_memories( - &self, - scope: &crate::memory_index::MemoryScope, - owner_id: &str, - ) -> Result { - let mut stats = CleanupStats::default(); - - let index = self.index_manager.get_or_create_index(scope, owner_id).await?; - let mut index = index.write().await; - - let memory_ids: Vec = index.memories.keys().cloned().collect(); - stats.total_scanned = memory_ids.len(); - - let mut to_delete = Vec::new(); - let mut to_archive = Vec::new(); - let mut to_consolidate = Vec::new(); - - for memory_id in &memory_ids { - if let Some(memory) = index.memories.get_mut(memory_id) { - let change = self.engine.update_memory_state(memory); - - if change.should_forget { - to_delete.push(memory_id.clone()); - } else if change.archived { - to_archive.push(memory_id.clone()); - } else if let Some(new_phase) = change.new_phase { - to_consolidate.push((memory_id.clone(), new_phase)); - } - - if change.strength_changed { - stats.strength_updated += 1; - } - } - } - - // 执行删除 - for memory_id in &to_delete { - if let Some(memory) = index.memories.remove(memory_id) { - self.delete_memory_files(&memory.file).await?; - self.emit_delete_event(memory_id, &memory.file).await; - stats.deleted += 1; - } - } - - // 归档已在 update_memory_state 中更新了 phase - stats.archived = to_archive.len(); - - // 巩固计数 - stats.consolidated = to_consolidate.len(); - - // 保存更新后的索引 - self.index_manager.save_index(scope, owner_id, &index).await?; - - Ok(stats) - } - - async fn delete_memory_files(&self, file_path: &str) -> Result<()> { - // 从文件系统删除 - let uri = format!("cortex://user/{}", file_path); - if self.filesystem.exists(&uri).await? { - self.filesystem.delete(&uri).await?; - } - - // 从向量存储删除 - self.vector_store.delete_by_uri(&uri).await?; - - Ok(()) - } - - async fn emit_delete_event(&self, memory_id: &str, file_path: &str) { - let event = MemoryEvent::Deleted { - memory_id: memory_id.to_string(), - uri: format!("cortex://user/{}", file_path), - reason: DeleteReason::Expired, - timestamp: chrono::Utc::now(), - }; - - let _ = self.event_tx.send(event); - } -} - -/// 手动遗忘操作 -impl MemoryCleanupService { - /// 标记记忆为重要(永不遗忘) - pub async fn pin_memory(&self, scope: &crate::memory_index::MemoryScope, owner_id: &str, memory_id: &str) -> Result<()> { - let index = self.index_manager.get_or_create_index(scope, owner_id).await?; - let mut index = index.write().await; - - if let Some(memory) = index.memories.get_mut(memory_id) { - memory.is_pinned = true; - memory.current_strength = 1.0; - } - - self.index_manager.save_index(scope, owner_id, &index).await?; - Ok(()) - } - - /// 取消固定 - pub async fn unpin_memory(&self, scope: &crate::memory_index::MemoryScope, owner_id: &str, memory_id: &str) -> Result<()> { - let index = self.index_manager.get_or_create_index(scope, owner_id).await?; - let mut index = index.write().await; - - if let Some(memory) = index.memories.get_mut(memory_id) { - memory.is_pinned = false; - } - - self.index_manager.save_index(scope, owner_id, &index).await?; - Ok(()) - } - - /// 手动删除记忆 - pub async fn delete_memory(&self, scope: &crate::memory_index::MemoryScope, owner_id: &str, memory_id: &str) -> Result<()> { - let index = self.index_manager.get_or_create_index(scope, owner_id).await?; - let mut index = index.write().await; - - if let Some(memory) = index.memories.remove(memory_id) { - self.delete_memory_files(&memory.file).await?; - self.emit_delete_event(memory_id, &memory.file).await; - } - - self.index_manager.save_index(scope, owner_id, &index).await?; - Ok(()) - } - - /// 设置记忆过期时间 - pub async fn set_memory_expiry( - &self, - scope: &crate::memory_index::MemoryScope, - owner_id: &str, - memory_id: &str, - expires_at: Option>, - ) -> Result<()> { - let index = self.index_manager.get_or_create_index(scope, owner_id).await?; - let mut index = index.write().await; - - if let Some(memory) = index.memories.get_mut(memory_id) { - memory.expires_at = expires_at; - } - - self.index_manager.save_index(scope, owner_id, &index).await?; - Ok(()) - } - - /// 手动调整记忆重要性 - pub async fn set_memory_importance( - &self, - scope: &crate::memory_index::MemoryScope, - owner_id: &str, - memory_id: &str, - importance: f32, - ) -> Result<()> { - let index = self.index_manager.get_or_create_index(scope, owner_id).await?; - let mut index = index.write().await; - - if let Some(memory) = index.memories.get_mut(memory_id) { - memory.importance = importance.clamp(0.0, 1.0); - } - - self.index_manager.save_index(scope, owner_id, &index).await?; - Ok(()) - } -} -``` - -### 3.4 检索时考虑记忆强度 - -```rust -// 文件: cortex-mem-core/src/search/vector_engine.rs (修改) - -impl VectorSearchEngine { - /// 在检索结果中加入记忆强度因子 - async fn apply_forgetting_factor(&self, results: &mut [SearchResult]) { - for result in results.iter_mut() { - // 从 metadata 获取记忆强度 - if let Some(strength) = self.get_memory_strength(&result.uri).await { - // 调整分数:原始分数 × 强度因子 - // 使用幂函数使高强度记忆保持高分,低强度记忆显著降权 - let strength_factor = strength.powf(0.5); // 平方根减缓衰减 - result.score *= strength_factor; - } - } - } - - async fn get_memory_strength(&self, uri: &str) -> Option { - // 从索引中获取记忆强度 - // 实现取决于具体的索引查询方式 - None - } -} -``` - -### 3.5 配置支持 - -```toml -# config.toml 新增配置 - -[forgetting] -# 是否启用遗忘机制 -enabled = true - -# 是否启用自动清理 -auto_cleanup = true - -# 清理周期(小时) -cleanup_interval_hours = 24 - -# 半衰期配置(天) -working_half_life = 7 -short_term_half_life = 30 -long_term_half_life = 180 - -# 巩固阈值:访问次数达到此值后进入下一阶段 -consolidation_threshold = 5 - -# 遗忘阈值:强度低于此值的记忆将被清理 -forgetting_threshold = 0.1 - -# 归档阈值:强度低于此值进入归档 -archive_threshold = 0.3 - -# 归档记忆是否参与检索 -include_archived_in_search = false -``` - -### 3.6 CLI 命令扩展 - -```rust -// cortex-mem-cli 新增命令 - -// 记忆管理命令 -cortex-mem-cli memory pin // 标记为重要 -cortex-mem-cli memory unpin // 取消标记 -cortex-mem-cli memory expire --days 30 // 设置过期时间 -cortex-mem-cli memory importance --value 0.9 // 设置重要性 - -// 遗忘管理命令 -cortex-mem-cli forgetting status // 查看遗忘状态统计 -cortex-mem-cli forgetting preview // 预览将要被清理的记忆 -cortex-mem-cli forgetting run // 手动触发清理 -cortex-mem-cli forgetting config // 查看当前配置 -``` - -## 4. 数据迁移方案 - -### 4.1 从现有元数据迁移 - -```rust -// 文件: cortex-mem-core/src/migration/memory_metadata.rs (新建) - -/// 迁移现有的 MemoryMetadata 到 EnhancedMemoryMetadata -pub async fn migrate_memory_indices(data_dir: &Path) -> Result { - let mut stats = MigrationStats::default(); - - // 扫描所有 .memory_index.json 文件 - for entry in walkdir::WalkDir::new(data_dir) - .into_iter() - .filter_map(|e| e.ok()) - .filter(|e| e.file_name() == ".memory_index.json") - { - let path = entry.path(); - match migrate_single_index(path).await { - Ok(s) => stats.merge(s), - Err(e) => { - warn!("Failed to migrate {}: {}", path.display(), e); - stats.errors += 1; - } - } - } - - Ok(stats) -} - -async fn migrate_single_index(path: &Path) -> Result { - // 1. 读取旧格式 - let content = tokio::fs::read_to_string(path).await?; - let old_index: MemoryIndex = serde_json::from_str(&content)?; - - // 2. 转换为新格式 - let new_index = EnhancedMemoryIndex { - version: 2, - scope: old_index.scope, - owner_id: old_index.owner_id, - last_updated: Utc::now(), - memories: old_index.memories - .into_iter() - .map(|(k, v)| (k, EnhancedMemoryMetadata::from_legacy(v))) - .collect(), - session_summaries: old_index.session_summaries, - }; - - // 3. 备份旧文件 - let backup_path = path.with_extension("json.bak"); - tokio::fs::copy(path, &backup_path).await?; - - // 4. 写入新格式 - let new_content = serde_json::to_string_pretty(&new_index)?; - tokio::fs::write(path, new_content).await?; - - Ok(MigrationStats { - migrated: new_index.memories.len(), - errors: 0, - }) -} -``` - -## 5. 实现计划 - -| 步骤 | 任务 | 依赖 | -|------|------|------| -| 1 | 定义 EnhancedMemoryMetadata 和 ForgettingConfig | 无 | -| 2 | 实现 ForgettingEngine 核心算法 | 步骤1 | -| 3 | 实现 MemoryCleanupService | 步骤2 | -| 4 | 修改检索逻辑,考虑记忆强度 | 步骤1 | -| 5 | 添加 CLI 命令 | 步骤3 | -| 6 | 实现数据迁移工具 | 步骤1 | -| 7 | 添加配置支持 | 全部 | - -## 6. 预期收益 - -| 指标 | 优化前 | 优化后 | 提升 | -|------|--------|--------|------| -| 6个月记忆量 | ~3000 条 | ~1500 条 | 50% 减少 | -| 召回噪声率 | ~25% | ~8% | 17pp 下降 | -| 存储空间 | 100% | 60% | 40% 节省 | -| 检索延迟 | 100% | 70% | 30% 提升 | - -## 7. 注意事项 - -1. **默认保守**:首次部署时遗忘阈值设置较高,避免误删重要记忆 -2. **用户可控**:提供 pin/unpin 功能让用户手动保护重要记忆 -3. **审计日志**:记录所有遗忘操作,支持审计和恢复 -4. **灰度发布**:先在测试环境验证,再逐步推广 diff --git a/docs_plan_v2.6/4_dynamic_weight_allocation.md b/docs_plan_v2.6/4_dynamic_weight_allocation.md deleted file mode 100644 index 9e5a2d4..0000000 --- a/docs_plan_v2.6/4_dynamic_weight_allocation.md +++ /dev/null @@ -1,929 +0,0 @@ -# 动态权重分配技术方案 - -## 1. 问题分析 - -### 1.1 当前静态权重的问题 - -**位置**: `cortex-mem-core/src/search/vector_engine.rs:466-468` - -三层级联检索的权重是硬编码的: - -```rust -// Combined scoring: 0.2*L0 + 0.3*L1 + 0.5*L2 -let combined_score = l0_score * 0.2 + l1_score * 0.3 + l2_score * 0.5; -``` - -**问题分析**: - -1. **无法适应不同查询类型**: - - 实体查询:"王明是谁" → 需要 L2 精确匹配,当前权重合理 - - 语义查询:"讨论了什么技术方案" → L0/L1 更重要,权重应调整 - - 时间查询:"上周的决策" → 需要结合时间信息,权重需动态调整 - -2. **无法适应不同记忆类型**: - - Preference(偏好):L0 抽象通常已包含关键信息 - - Event(事件):L2 细节更重要 - - Entity(实体):需要 L2 精确匹配 - -3. **与 Intent 分析割裂**:虽然有 Intent 分析,但未影响权重分配 - -### 1.2 现有降级策略的局限 - -当 L0 搜索失败时: - -```rust -// 策略1: 降低阈值重试 -let relaxed_threshold = (adaptive_threshold - 0.2).max(0.4); - -// 策略2: 完全降级到语义搜索 -return self.semantic_search(query, options).await; -``` - -**问题**:降级策略是"全有或全无",无法细粒度调整权重。 - -## 2. 解决方案 - -### 2.1 架构设计 - -``` -┌─────────────────────────────────────────────────────────────────────────────┐ -│ 动态权重分配架构 │ -├─────────────────────────────────────────────────────────────────────────────┤ -│ │ -│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │ -│ │ 查询意图 │───►│ 上下文 │───►│ 权重计算 │───►│ 检索执行 │ │ -│ │ 分析结果 │ │ 特征提取 │ │ 引擎 │ │ │ │ -│ └─────────────┘ └─────────────┘ └─────────────┘ └─────────────┘ │ -│ │ │ │ -│ │ │ │ -│ ▼ ▼ │ -│ ┌──────────────────────────────────────────┐ │ -│ │ 权重模板库 + 自适应学习 │ │ -│ │ ┌─────────┐ ┌─────────┐ ┌─────────┐ │ │ -│ │ │预定义模板│ │历史反馈 │ │实时调整 │ │ │ -│ │ └─────────┘ └─────────┘ └─────────┘ │ │ -│ └──────────────────────────────────────────┘ │ -│ │ -└─────────────────────────────────────────────────────────────────────────────┘ -``` - -### 2.2 权重分配模型 - -#### 2.2.1 权重计算输入特征 - -```rust -// 文件: cortex-mem-core/src/search/weight_model.rs (新建) - -use serde::{Deserialize, Serialize}; - -/// 权重计算的输入特征 -#[derive(Debug, Clone)] -pub struct WeightFeatures { - /// 查询类型特征 - pub query_type: QueryTypeFeature, - /// 记忆类型特征 - pub memory_type: Option, - /// 查询长度特征 - pub query_length: QueryLengthFeature, - /// 时间约束特征 - pub time_constraint: bool, - /// 实体数量 - pub entity_count: usize, - /// 查询复杂度 - pub complexity: f32, - /// 用户历史偏好(如果有) - pub user_preference: Option, -} - -#[derive(Debug, Clone, Copy)] -pub enum QueryTypeFeature { - Factual, - EntityLookup, - Temporal, - Relational, - Procedural, - Conceptual, - Search, - Comparative, - General, -} - -#[derive(Debug, Clone, Copy)] -pub enum MemoryTypeFeature { - Preference, - Entity, - Event, - Case, - PersonalInfo, - WorkHistory, - Relationship, - Goal, - Conversation, -} - -#[derive(Debug, Clone, Copy)] -pub enum QueryLengthFeature { - /// 短查询(≤5词) - Short, - /// 中等查询(6-15词) - Medium, - /// 长查询(>15词) - Long, -} - -/// 用户层级偏好(从历史反馈学习) -#[derive(Debug, Clone, Default)] -pub struct LayerPreference { - /// L0 偏好得分 - pub l0_score: f32, - /// L1 偏好得分 - pub l1_score: f32, - /// L2 偏好得分 - pub l2_score: f32, -} -``` - -#### 2.2.2 权重分配策略 - -```rust -// 文件: cortex-mem-core/src/search/weight_model.rs (继续) - -/// 三层权重 -#[derive(Debug, Clone, Copy, Serialize, Deserialize)] -pub struct LayerWeights { - pub l0: f32, // L0 抽象层权重 - pub l1: f32, // L1 概览层权重 - pub l2: f32, // L2 细节层权重 -} - -impl LayerWeights { - /// 归一化权重(确保总和为 1.0) - pub fn normalize(&self) -> Self { - let total = self.l0 + self.l1 + self.l2; - if total == 0.0 { - return Self::default(); - } - Self { - l0: self.l0 / total, - l1: self.l1 / total, - l2: self.l2 / total, - } - } - - /// 默认权重 - pub fn default_weights() -> Self { - Self { l0: 0.2, l1: 0.3, l2: 0.5 } - } -} - -impl Default for LayerWeights { - fn default() -> Self { - Self::default_weights() - } -} - -/// 权重分配策略 -pub trait WeightStrategy: Send + Sync { - /// 计算权重 - fn calculate(&self, features: &WeightFeatures) -> LayerWeights; - - /// 策略名称 - fn name(&self) -> &str; -} - -/// 预定义模板策略 -pub struct TemplateWeightStrategy { - templates: Vec, -} - -/// 权重模板 -#[derive(Debug, Clone)] -pub struct WeightTemplate { - /// 模板名称 - pub name: String, - /// 适用条件 - pub condition: TemplateCondition, - /// 权重值 - pub weights: LayerWeights, - /// 优先级(越高越优先) - pub priority: u8, -} - -#[derive(Debug, Clone)] -pub enum TemplateCondition { - /// 查询类型匹配 - QueryType(QueryTypeFeature), - /// 记忆类型匹配 - MemoryType(MemoryTypeFeature), - /// 查询长度匹配 - QueryLength(QueryLengthFeature), - /// 实体查询 - HasEntities, - /// 时间约束 - HasTimeConstraint, - /// 复合条件 - And(Box, Box), - Or(Box, Box), -} - -impl TemplateCondition { - pub fn matches(&self, features: &WeightFeatures) -> bool { - match self { - Self::QueryType(qt) => features.query_type == *qt, - Self::MemoryType(mt) => features.memory_type == Some(*mt), - Self::QueryLength(ql) => features.query_length == *ql, - Self::HasEntities => features.entity_count > 0, - Self::HasTimeConstraint => features.time_constraint, - Self::And(left, right) => left.matches(features) && right.matches(features), - Self::Or(left, right) => left.matches(features) || right.matches(features), - } - } -} - -impl TemplateWeightStrategy { - pub fn new() -> Self { - Self { - templates: Self::default_templates(), - } - } - - /// 默认模板库 - fn default_templates() -> Vec { - vec![ - // === 查询类型相关模板 === - - // 实体查询:L2 权重最高(需要精确匹配) - WeightTemplate { - name: "entity_lookup".to_string(), - condition: TemplateCondition::QueryType(QueryTypeFeature::EntityLookup), - weights: LayerWeights { l0: 0.1, l1: 0.2, l2: 0.7 }, - priority: 100, - }, - - // 事实查询:L2 权重较高 - WeightTemplate { - name: "factual".to_string(), - condition: TemplateCondition::QueryType(QueryTypeFeature::Factual), - weights: LayerWeights { l0: 0.15, l1: 0.25, l2: 0.6 }, - priority: 90, - }, - - // 时间查询:均衡分布 - WeightTemplate { - name: "temporal".to_string(), - condition: TemplateCondition::QueryType(QueryTypeFeature::Temporal), - weights: LayerWeights { l0: 0.2, l1: 0.35, l2: 0.45 }, - priority: 90, - }, - - // 关系查询:L1 权重最高(概览层包含关系信息) - WeightTemplate { - name: "relational".to_string(), - condition: TemplateCondition::QueryType(QueryTypeFeature::Relational), - weights: LayerWeights { l0: 0.2, l1: 0.5, l2: 0.3 }, - priority: 90, - }, - - // 过程查询:L2 权重高(需要详细步骤) - WeightTemplate { - name: "procedural".to_string(), - condition: TemplateCondition::QueryType(QueryTypeFeature::Procedural), - weights: LayerWeights { l0: 0.1, l1: 0.3, l2: 0.6 }, - priority: 90, - }, - - // 概念查询:L1 权重高(概览层包含概念解释) - WeightTemplate { - name: "conceptual".to_string(), - condition: TemplateCondition::QueryType(QueryTypeFeature::Conceptual), - weights: LayerWeights { l0: 0.25, l1: 0.45, l2: 0.3 }, - priority: 90, - }, - - // 搜索查询:L0 权重高(快速定位) - WeightTemplate { - name: "search".to_string(), - condition: TemplateCondition::QueryType(QueryTypeFeature::Search), - weights: LayerWeights { l0: 0.4, l1: 0.35, l2: 0.25 }, - priority: 90, - }, - - // 比较查询:L1 权重高 - WeightTemplate { - name: "comparative".to_string(), - condition: TemplateCondition::QueryType(QueryTypeFeature::Comparative), - weights: LayerWeights { l0: 0.2, l1: 0.5, l2: 0.3 }, - priority: 90, - }, - - // === 记忆类型相关模板 === - - // 偏好记忆:L0 足够(偏好通常简洁) - WeightTemplate { - name: "preference_memory".to_string(), - condition: TemplateCondition::MemoryType(MemoryTypeFeature::Preference), - weights: LayerWeights { l0: 0.4, l1: 0.4, l2: 0.2 }, - priority: 80, - }, - - // 事件记忆:L2 重要(需要详细上下文) - WeightTemplate { - name: "event_memory".to_string(), - condition: TemplateCondition::MemoryType(MemoryTypeFeature::Event), - weights: LayerWeights { l0: 0.15, l1: 0.25, l2: 0.6 }, - priority: 80, - }, - - // 案例记忆:L1/L2 重要(需要结构和细节) - WeightTemplate { - name: "case_memory".to_string(), - condition: TemplateCondition::MemoryType(MemoryTypeFeature::Case), - weights: LayerWeights { l0: 0.15, l1: 0.4, l2: 0.45 }, - priority: 80, - }, - - // === 查询长度相关模板 === - - // 短查询:L0 权重高(快速匹配) - WeightTemplate { - name: "short_query".to_string(), - condition: TemplateCondition::QueryLength(QueryLengthFeature::Short), - weights: LayerWeights { l0: 0.35, l1: 0.35, l2: 0.3 }, - priority: 70, - }, - - // 长查询:L2 权重高(需要深度匹配) - WeightTemplate { - name: "long_query".to_string(), - condition: TemplateCondition::QueryLength(QueryLengthFeature::Long), - weights: LayerWeights { l0: 0.15, l1: 0.3, l2: 0.55 }, - priority: 70, - }, - - // === 复合条件模板 === - - // 实体查询 + 短查询:极端偏向 L2 - WeightTemplate { - name: "entity_short".to_string(), - condition: TemplateCondition::And( - Box::new(TemplateCondition::QueryType(QueryTypeFeature::EntityLookup)), - Box::new(TemplateCondition::QueryLength(QueryLengthFeature::Short)) - ), - weights: LayerWeights { l0: 0.05, l1: 0.15, l2: 0.8 }, - priority: 110, - }, - - // 时间约束查询 - WeightTemplate { - name: "time_constrained".to_string(), - condition: TemplateCondition::HasTimeConstraint, - weights: LayerWeights { l0: 0.2, l1: 0.35, l2: 0.45 }, - priority: 85, - }, - ] - } -} - -impl WeightStrategy for TemplateWeightStrategy { - fn calculate(&self, features: &WeightFeatures) -> LayerWeights { - // 按优先级排序,找到第一个匹配的模板 - let mut matched: Vec<_> = self.templates - .iter() - .filter(|t| t.condition.matches(features)) - .collect(); - - matched.sort_by(|a, b| b.priority.cmp(&a.priority)); - - if let Some(template) = matched.first() { - template.weights.normalize() - } else { - LayerWeights::default_weights() - } - } - - fn name(&self) -> &str { - "template" - } -} -``` - -#### 2.2.3 自适应学习策略 - -```rust -// 文件: cortex-mem-core/src/search/weight_model.rs (继续) - -use std::collections::HashMap; -use std::sync::Arc; -use tokio::sync::RwLock; - -/// 反馈记录 -#[derive(Debug, Clone)] -pub struct SearchFeedback { - /// 查询特征 - pub features: WeightFeatures, - /// 使用的权重 - pub weights: LayerWeights, - /// 用户是否点击了结果 - pub clicked: bool, - /// 点击结果的排名(1-based) - pub clicked_rank: Option, - /// 用户满意度评分(可选) - pub satisfaction: Option, -} - -/// 自适应权重学习策略 -pub struct AdaptiveWeightStrategy { - /// 基础策略 - base_strategy: Box, - /// 学习率 - learning_rate: f32, - /// 特征到权重调整的映射 - adjustments: Arc>>, - /// 历史反馈统计 - feedback_stats: Arc>, -} - -#[derive(Debug, Clone, Default)] -struct FeedbackStats { - total_feedbacks: usize, - positive_feedbacks: usize, - feature_click_rates: HashMap, // (clicks, total) -} - -impl AdaptiveWeightStrategy { - pub fn new(base_strategy: Box) -> Self { - Self { - base_strategy, - learning_rate: 0.1, - adjustments: Arc::new(RwLock::new(HashMap::new())), - feedback_stats: Arc::new(RwLock::new(FeedbackStats::default())), - } - } - - /// 记录反馈并学习 - pub async fn record_feedback(&self, feedback: SearchFeedback) { - let mut stats = self.feedback_stats.write().await; - stats.total_feedbacks += 1; - - if feedback.clicked { - stats.positive_feedbacks += 1; - - // 更新特征点击率 - let feature_key = Self::feature_key(&feedback.features); - let entry = stats.feature_click_rates.entry(feature_key.clone()).or_insert((0, 0)); - entry.0 += 1; - entry.1 += 1; - - // 学习:如果用户点击了结果,增加该权重的置信度 - // 这里可以更复杂的在线学习算法 - } - } - - fn feature_key(features: &WeightFeatures) -> String { - format!("{:?}_{:?}", features.query_type, features.query_length) - } -} - -impl WeightStrategy for AdaptiveWeightStrategy { - fn calculate(&self, features: &WeightFeatures) -> LayerWeights { - // 1. 获取基础权重 - let base_weights = self.base_strategy.calculate(features); - - // 2. 尝试同步获取调整(简化版本,实际应该 async) - // 由于 trait 方法是同步的,这里使用 try_read - if let Ok(adjustments) = self.adjustments.try_read() { - let feature_key = Self::feature_key(features); - if let Some(adjustment) = adjustments.get(&feature_key) { - // 融合基础权重和调整 - return LayerWeights { - l0: base_weights.l0 * (1.0 - self.learning_rate) + adjustment.l0 * self.learning_rate, - l1: base_weights.l1 * (1.0 - self.learning_rate) + adjustment.l1 * self.learning_rate, - l2: base_weights.l2 * (1.0 - self.learning_rate) + adjustment.l2 * self.learning_rate, - }.normalize(); - } - } - - base_weights - } - - fn name(&self) -> &str { - "adaptive" - } -} -``` - -### 2.3 动态权重检索执行器 - -```rust -// 文件: cortex-mem-core/src/search/dynamic_weight_executor.rs (新建) - -use crate::search::weight_model::*; -use crate::search::{VectorSearchEngine, SearchOptions, SearchResult, QueryIntent}; -use crate::embedding::EmbeddingClient; -use crate::vector_store::QdrantVectorStore; -use crate::filesystem::CortexFilesystem; -use crate::Result; -use std::sync::Arc; -use tracing::{info, debug}; - -/// 动态权重检索执行器 -pub struct DynamicWeightSearchExecutor { - vector_engine: Arc, - weight_strategy: Box, - embedding: Arc, -} - -impl DynamicWeightSearchExecutor { - pub fn new( - vector_engine: Arc, - embedding: Arc, - strategy: Box, - ) -> Self { - Self { - vector_engine, - weight_strategy: strategy, - embedding, - } - } - - /// 使用动态权重执行三层级联检索 - pub async fn search_with_dynamic_weights( - &self, - query: &str, - intent: &QueryIntent, - options: &SearchOptions, - ) -> Result> { - // 1. 提取权重特征 - let features = self.extract_features(query, intent, options).await?; - - // 2. 计算动态权重 - let weights = self.weight_strategy.calculate(&features); - - info!( - "Dynamic weights for query '{}': L0={:.2}, L1={:.2}, L2={:.2}", - query, weights.l0, weights.l1, weights.l2 - ); - - // 3. 执行三层级联检索 - self.execute_with_weights(query, &weights, options).await - } - - /// 提取权重特征 - async fn extract_features( - &self, - query: &str, - intent: &QueryIntent, - options: &SearchOptions, - ) -> Result { - // 从 Intent 中提取特征 - let query_type = Self::map_intent_to_feature(&intent.intent_type); - - // 查询长度特征 - let word_count = query.split_whitespace().count(); - let query_length = match word_count { - 0..=5 => QueryLengthFeature::Short, - 6..=15 => QueryLengthFeature::Medium, - _ => QueryLengthFeature::Long, - }; - - // 计算复杂度(基于查询长度、关键词数量等) - let complexity = Self::calculate_complexity(query, &intent.keywords); - - Ok(WeightFeatures { - query_type, - memory_type: None, // 可从 options 或 filters 中获取 - query_length, - time_constraint: false, // 可从 intent 中判断 - entity_count: 0, // 可从 intent.entities 中获取 - complexity, - user_preference: None, - }) - } - - fn map_intent_to_feature(intent_type: &crate::search::QueryIntentType) -> QueryTypeFeature { - match intent_type { - crate::search::QueryIntentType::Factual => QueryTypeFeature::Factual, - crate::search::QueryIntentType::Search => QueryTypeFeature::Search, - crate::search::QueryIntentType::Relational => QueryTypeFeature::Relational, - crate::search::QueryIntentType::Temporal => QueryTypeFeature::Temporal, - crate::search::QueryIntentType::General => QueryTypeFeature::General, - } - } - - fn calculate_complexity(query: &str, keywords: &[String]) -> f32 { - let len_factor = (query.len() as f32 / 100.0).min(1.0); - let keyword_factor = (keywords.len() as f32 / 10.0).min(1.0); - (len_factor + keyword_factor) / 2.0 - } - - /// 执行带权重的三层检索 - async fn execute_with_weights( - &self, - query: &str, - weights: &LayerWeights, - options: &SearchOptions, - ) -> Result> { - // 生成查询嵌入 - let query_vec = self.embedding.embed(query).await?; - - // === Stage 1: L0 快速定位 === - let l0_threshold = self.calculate_adaptive_l0_threshold(weights.l0); - let l0_results = self.search_l0(&query_vec, options, l0_threshold).await?; - - if l0_results.is_empty() { - debug!("L0 results empty, fallback to full semantic search"); - return self.vector_engine.semantic_search(query, options).await; - } - - // === Stage 2: L1 深度探索 === - let l1_candidates = self.search_l1(&query_vec, &l0_results, options).await?; - - // === Stage 3: L2 精确匹配 === - let final_results = self.search_l2_with_weights( - &query_vec, - &l0_results, - &l1_candidates, - weights, - options - ).await?; - - Ok(final_results) - } - - fn calculate_adaptive_l0_threshold(&self, l0_weight: f32) -> f32 { - // L0 权重越高,阈值应该越低(更宽松地通过 L0) - // 基础阈值 0.5,根据权重调整 - let base = 0.5; - let adjustment = (l0_weight - 0.2) * 0.3; // 权重差异的影响 - (base - adjustment).clamp(0.3, 0.6) - } - - async fn search_l0( - &self, - query_vec: &[f32], - options: &SearchOptions, - threshold: f32, - ) -> Result> { - // 调用现有的 L0 搜索逻辑 - // 实际实现需要访问 vector_store - Ok(Vec::new()) - } - - async fn search_l1( - &self, - query_vec: &[f32], - l0_results: &[crate::types::ScoredMemory], - options: &SearchOptions, - ) -> Result> { - // L1 搜索逻辑 - Ok(Vec::new()) - } - - async fn search_l2_with_weights( - &self, - query_vec: &[f32], - l0_results: &[crate::types::ScoredMemory], - l1_candidates: &[(String, f32)], - weights: &LayerWeights, - options: &SearchOptions, - ) -> Result> { - let mut results = Vec::new(); - - // 遍历候选,使用动态权重计算综合分数 - for (dir_uri, l0_score, l1_score) in self.combine_l0_l1(l0_results, l1_candidates) { - // 获取 L2 内容并计算分数 - if let Some(l2_result) = self.search_l2_for_dir(query_vec, &dir_uri).await? { - // 使用动态权重计算综合分数 - let combined_score = - l0_score * weights.l0 + - l1_score * weights.l1 + - l2_result.score * weights.l2; - - if combined_score >= options.threshold { - results.push(SearchResult { - uri: l2_result.uri, - score: combined_score, - snippet: l2_result.snippet, - content: l2_result.content, - }); - } - } - } - - // 排序并截断 - results.sort_by(|a, b| b.score.partial_cmp(&a.score).unwrap()); - results.truncate(options.limit); - - Ok(results) - } - - fn combine_l0_l1( - &self, - l0_results: &[crate::types::ScoredMemory], - l1_candidates: &[(String, f32)], - ) -> Vec<(String, f32, f32)> { - // 合并 L0 和 L1 结果 - Vec::new() - } - - async fn search_l2_for_dir( - &self, - query_vec: &[f32], - dir_uri: &str, - ) -> Result> { - // 在目录中搜索 L2 内容 - Ok(None) - } -} -``` - -### 2.4 集成到搜索引擎 - -```rust -// 文件: cortex-mem-core/src/search/vector_engine.rs (修改) - -impl VectorSearchEngine { - /// 智能搜索(使用动态权重) - pub async fn smart_search_with_dynamic_weights( - &self, - query: &str, - options: &SearchOptions, - intent: Option<&QueryIntent>, - ) -> Result> { - // 1. 分析意图 - let analyzed_intent = if let Some(i) = intent { - i.clone() - } else { - self.analyze_intent(query).await? - }; - - // 2. 选择权重策略 - let strategy = self.select_weight_strategy(); - - // 3. 创建动态权重执行器 - let executor = DynamicWeightSearchExecutor::new( - Arc::new(self.clone()), - self.embedding.clone(), - strategy, - ); - - // 4. 执行检索 - executor.search_with_dynamic_weights(query, &analyzed_intent, options).await - } - - fn select_weight_strategy(&self) -> Box { - // 可以根据配置选择不同策略 - // 默认使用模板策略 - Box::new(TemplateWeightStrategy::new()) - } - - /// 更新检索反馈(用于自适应学习) - pub async fn record_search_feedback( - &self, - query: &str, - clicked_result_uri: &str, - rank: usize, - ) -> Result<()> { - // 记录反馈,用于后续学习 - Ok(()) - } -} -``` - -### 2.5 配置支持 - -```toml -# config.toml 新增配置 - -[search.weights] -# 权重策略: template, adaptive, hybrid -strategy = "template" - -# 默认权重(当没有匹配模板时使用) -default_l0 = 0.2 -default_l1 = 0.3 -default_l2 = 0.5 - -# 自适应学习配置 -[search.weights.adaptive] -enabled = false -learning_rate = 0.1 -min_samples_for_learning = 100 - -# 自定义权重模板 -[[search.weights.templates]] -name = "custom_entity" -query_type = "entity_lookup" -l0 = 0.1 -l1 = 0.15 -l2 = 0.75 -``` - -### 2.6 API 扩展 - -```rust -// REST API 新增端点 - -// 获取当前权重策略状态 -GET /api/v2/search/weights/status - -// 手动设置权重(测试用) -POST /api/v2/search/weights/set -{ - "l0": 0.3, - "l1": 0.3, - "l2": 0.4 -} - -// 记录检索反馈 -POST /api/v2/search/feedback -{ - "query": "用户的查询", - "clicked_uri": "cortex://user/preferences/xxx.md", - "rank": 1, - "satisfaction": 0.9 -} -``` - -## 3. 实现计划 - -| 步骤 | 任务 | 依赖 | -|------|------|------| -| 1 | 定义 WeightFeatures 和 LayerWeights 结构 | 无 | -| 2 | 实现 TemplateWeightStrategy | 步骤1 | -| 3 | 实现 AdaptiveWeightStrategy | 步骤1, 2 | -| 4 | 实现 DynamicWeightSearchExecutor | 步骤2 | -| 5 | 集成到 VectorSearchEngine | 步骤4 | -| 6 | 添加配置和 API 支持 | 全部 | - -## 4. 预期收益 - -| 场景 | 静态权重 | 动态权重 | 提升 | -|------|----------|----------|------| -| 实体查询召回 | 75% | 92% | +17pp | -| 语义查询召回 | 88% | 91% | +3pp | -| 关系查询召回 | 70% | 85% | +15pp | -| 综合召回精度 | 85% | 93% | +8pp | - -## 5. 测试方案 - -### 5.1 单元测试 - -```rust -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_template_matching() { - let strategy = TemplateWeightStrategy::new(); - - // 测试实体查询 - let features = WeightFeatures { - query_type: QueryTypeFeature::EntityLookup, - ..Default::default() - }; - let weights = strategy.calculate(&features); - assert!(weights.l2 > weights.l0); - assert!(weights.l2 > weights.l1); - - // 测试搜索查询 - let features = WeightFeatures { - query_type: QueryTypeFeature::Search, - ..Default::default() - }; - let weights = strategy.calculate(&features); - assert!(weights.l0 >= weights.l2); - } - - #[test] - fn test_weight_normalization() { - let weights = LayerWeights { l0: 1.0, l1: 2.0, l2: 2.0 }; - let normalized = weights.normalize(); - assert!((normalized.l0 + normalized.l1 + normalized.l2 - 1.0).abs() < 0.001); - } -} -``` - -### 5.2 A/B 测试 - -```rust -// 在生产环境中进行 A/B 测试 -// 对照组:静态权重 -// 实验组:动态权重 -// 指标:召回率、用户满意度、检索延迟 -``` - -## 6. 与其他方案的协同 - -1. **与 Intent 分析协同**: - - Intent 分析提供 `query_type`,直接作为权重特征输入 - - 避免重复分析 - -2. **与遗忘机制协同**: - - 低强度记忆在 L2 权重中降权 - - 权重计算可考虑记忆强度 - -3. **与实体检索协同**: - - 实体查询自动触发高 L2 权重 - - 实体数量作为特征输入 diff --git a/litho.docs/en/1.Overview.md b/litho.docs/en/1.Overview.md index 8b7d903..202d449 100644 --- a/litho.docs/en/1.Overview.md +++ b/litho.docs/en/1.Overview.md @@ -104,7 +104,7 @@ The following components are **within the system boundary** and are developed, m | Component | Description | |---------|-------------| -| **cortex-mem-core** | The central library containing all core business logic: filesystem abstraction (`cortex://`), LLM client wrappers, embedding generation, Qdrant integration, session management, layer generation, extraction engine, search engine, and automation orchestrator. | +| **cortex-mem-core** | The central library containing all core business logic: filesystem abstraction (`cortex://`), LLM client wrappers, embedding generation, Qdrant integration, session management, layer generation, extraction engine, search engine, and automation orchestrator. v2.5 adds an event-driven incremental update system (`MemoryEventCoordinator`, `CascadeLayerUpdater`, `LlmResultCache`); v2.6 adds `MemoryCleanupService` based on the Ebbinghaus forgetting curve. | | **cortex-mem-cli** | Command-line interface for developers to interact with memory (search, list, get, delete, session, stats). | | **cortex-mem-service** | HTTP REST API server (Axum-based) exposing `/api/v2` endpoints for memory operations and tenant analytics. | | **cortex-mem-mcp** | Model Context Protocol server that exposes memory tools (`query_memory`, `store_memory`, etc.) via stdio for AI assistants. | diff --git a/litho.docs/en/2.Architecture.md b/litho.docs/en/2.Architecture.md index 92d331a..5bb2fb6 100644 --- a/litho.docs/en/2.Architecture.md +++ b/litho.docs/en/2.Architecture.md @@ -232,7 +232,7 @@ graph TD ### 3.2 Domain Module Division -The system comprises **12 distinct domains** organized by technical responsibility and business capability: +The system comprises **14 distinct domains** organized by technical responsibility and business capability: | Domain | Type | Responsibility | Key Components | |--------|------|----------------|----------------| @@ -244,6 +244,8 @@ The system comprises **12 distinct domains** organized by technical responsibili | **Search Engine** | Core Business | Semantic search with weighted scoring | VectorEngine, Intent Detection | | **Session Management** | Core Business | Conversation state, message tracking, timelines | SessionManager, Timeline, Message | | **Profile Management** | Core Business | User/agent profile persistence and merging | UserProfile, AgentProfile | +| **Incremental Update System** | Core Business | Event-driven incremental memory updates and cascading layer sync (v2.5) | MemoryEventCoordinator, CascadeLayerUpdater, LlmResultCache | +| **Memory Forgetting Mechanism** | Core Business | Low-strength memory cleanup based on Ebbinghaus forgetting curve (v2.6) | MemoryCleanupService, CleanupStats | | **Application Interface** | Application | Multi-protocol access layer (CLI, HTTP, MCP) | CLI Commands, Axum Handlers, MCP Service | | **Web UI** | Application | Visualization and management interface | Svelte Pages, Tenant Store, API Client | | **Tool Support** | Application | MCP tool definitions, Rig framework integration | Tool Definitions, Operation Wrappers | @@ -433,6 +435,7 @@ graph TB | **SessionManager** | Conversation lifecycle, message aggregation | Timeline ordering, participant tracking | | **ProfileManager** | Persistent user/agent knowledge bases | Category-based organization, importance scoring | | **MemoryIndexManager** | Memory index management, tracks memory update status | Session extraction records, index state management | +| **MemoryCleanupService** | Memory forgetting mechanism (v2.6), cleans low-strength memories based on Ebbinghaus curve | Periodic scanning, strength calculation, archive/delete strategies | #### 4.2.3 Infrastructure Components diff --git "a/litho.docs/zh/1\343\200\201\351\241\271\347\233\256\346\246\202\350\277\260.md" "b/litho.docs/zh/1\343\200\201\351\241\271\347\233\256\346\246\202\350\277\260.md" index 61b73b2..7e7de3a 100644 --- "a/litho.docs/zh/1\343\200\201\351\241\271\347\233\256\346\246\202\350\277\260.md" +++ "b/litho.docs/zh/1\343\200\201\351\241\271\347\233\256\346\246\202\350\277\260.md" @@ -104,7 +104,7 @@ Cortex-Mem在自身拥有什么和依赖什么之间定义了清晰的架构边 | 组件 | 描述 | |---------|-------------| -| **cortex-mem-core** | 包含所有核心业务逻辑的中心库:文件系统抽象(`cortex://`)、LLM客户端封装器、嵌入生成、Qdrant集成、会话管理、层级生成、提取引擎、搜索引擎和自动化编排器。 | +| **cortex-mem-core** | 包含所有核心业务逻辑的中心库:文件系统抽象(`cortex://`)、LLM客户端封装器、嵌入生成、Qdrant集成、会话管理、层级生成、提取引擎、搜索引擎和自动化编排器。v2.5 新增事件驱动增量更新系统(`MemoryEventCoordinator`、`CascadeLayerUpdater`、`LlmResultCache`);v2.6 新增基于 Ebbinghaus 遗忘曲线的 `MemoryCleanupService`。 | | **cortex-mem-cli** | 用于开发者交互记忆的命令行界面(搜索、列表、获取、删除、会话、统计)。 | | **cortex-mem-service** | HTTP REST API服务器(基于Axum),暴露`/api/v2`端点用于记忆操作和租户分析。 | | **cortex-mem-mcp** | 模型上下文协议服务器,通过stdio向AI助手暴露记忆工具(`query_memory`、`store_memory`等)。 | diff --git "a/litho.docs/zh/2\343\200\201\346\236\266\346\236\204\346\246\202\350\247\210.md" "b/litho.docs/zh/2\343\200\201\346\236\266\346\236\204\346\246\202\350\247\210.md" index f304bb2..30dc1a5 100644 --- "a/litho.docs/zh/2\343\200\201\346\236\266\346\236\204\346\246\202\350\247\210.md" +++ "b/litho.docs/zh/2\343\200\201\346\236\266\346\236\204\346\246\202\350\247\210.md" @@ -232,7 +232,7 @@ graph TD ### 3.2 领域模块划分 -系统由**12个不同的领域**组成,按技术职责和业务能力组织: +系统由**14个不同的领域**组成,按技术职责和业务能力组织: | 领域 | 类型 | 职责 | 关键组件 | |--------|------|----------------|----------------| @@ -244,6 +244,8 @@ graph TD | **搜索引擎** | 核心业务 | 加权评分的语义搜索 | VectorEngine、意图检测 | | **会话管理** | 核心业务 | 对话状态、消息跟踪、时间线 | SessionManager、Timeline、Message | | **配置文件管理** | 核心业务 | 用户/智能体配置文件持久化和合并 | UserProfile、AgentProfile | +| **增量更新系统** | 核心业务 | 事件驱动的增量记忆更新和级联层级同步(v2.5) | MemoryEventCoordinator、CascadeLayerUpdater、LlmResultCache | +| **记忆遗忘机制** | 核心业务 | 基于Ebbinghaus遗忘曲线的低强度记忆清理(v2.6) | MemoryCleanupService、CleanupStats | | **应用接口** | 应用 | 多协议访问层(CLI、HTTP、MCP) | CLI命令、Axum处理器、MCP服务 | | **Web UI** | 应用 | 可视化和管理界面 | Svelte页面、租户Store、API客户端 | | **工具支持** | 应用 | MCP工具定义、Rig框架集成 | 工具定义、操作封装 | @@ -433,6 +435,7 @@ graph TB | **SessionManager** | 对话生命周期,消息聚合 | 时间线排序、参与者跟踪 | | **ProfileManager** | 持久的用户/智能体知识库 | 基于类别的组织、重要性评分 | | **MemoryIndexManager** | 记忆索引管理,跟踪记忆更新状态 | 会话提取记录、索引状态管理 | +| **MemoryCleanupService** | 记忆遗忘机制(v2.6),基于 Ebbinghaus 遗忘曲线清理低强度记忆 | 周期扫描、强度计算、归档/删除策略 | #### 4.2.3 基础设施组件 From b882eebc69597bd9bdec716102148e7dc07700a9 Mon Sep 17 00:00:00 2001 From: Sopaco Date: Tue, 10 Mar 2026 20:31:06 +0800 Subject: [PATCH 12/20] comment optimize --- cortex-mem-core/src/automation/manager.rs | 2 +- cortex-mem-core/src/builder.rs | 16 ++++++++-------- cortex-mem-core/src/lib.rs | 5 ++--- cortex-mem-core/src/session/manager.rs | 6 +++--- cortex-mem-mcp/src/service.rs | 2 +- cortex-mem-service/src/handlers/sessions.rs | 2 +- cortex-mem-service/src/state.rs | 4 ++-- cortex-mem-tools/src/operations.rs | 10 +++++----- examples/cortex-mem-tars/src/app.rs | 1 - litho.docs/en/1.Overview.md | 2 +- litho.docs/en/2.Architecture.md | 2 +- ...1\271\347\233\256\346\246\202\350\277\260.md" | 2 +- ...6\266\346\236\204\346\246\202\350\247\210.md" | 4 ++-- 13 files changed, 28 insertions(+), 30 deletions(-) diff --git a/cortex-mem-core/src/automation/manager.rs b/cortex-mem-core/src/automation/manager.rs index 618a166..eece9a3 100644 --- a/cortex-mem-core/src/automation/manager.rs +++ b/cortex-mem-core/src/automation/manager.rs @@ -35,7 +35,7 @@ impl Default for AutomationConfig { /// 自动化管理器 /// -/// 职责(v2.5 重构后): +/// 职责 /// - 监听 `MessageAdded` 事件,将新消息内容(L2 级别)索引到向量数据库 /// /// 不再负责: diff --git a/cortex-mem-core/src/builder.rs b/cortex-mem-core/src/builder.rs index fceab10..c90dd7b 100644 --- a/cortex-mem-core/src/builder.rs +++ b/cortex-mem-core/src/builder.rs @@ -22,7 +22,7 @@ pub struct CortexMemBuilder { qdrant_config: Option, llm_client: Option>, session_config: SessionConfig, - /// v2.5: 事件协调器配置 + /// 事件协调器配置 coordinator_config: Option, } @@ -63,7 +63,7 @@ impl CortexMemBuilder { self } - /// v2.5: 配置事件协调器 + /// 配置事件协调器 pub fn with_coordinator_config(mut self, config: CoordinatorConfig) -> Self { self.coordinator_config = Some(config); self @@ -71,7 +71,7 @@ impl CortexMemBuilder { /// 🎯 构建完整的cortex-mem实例 pub async fn build(self) -> Result { - info!("Building Cortex Memory with v2.5 incremental update support"); + info!("Building Cortex Memory with incremental update support"); // 1. 初始化文件系统 let filesystem = Arc::new(CortexFilesystem::new( @@ -113,7 +113,7 @@ impl CortexMemBuilder { let (event_bus, _old_event_rx) = EventBus::new(); let event_bus = Arc::new(event_bus); - // 5. v2.5: 创建 MemoryEventCoordinator(如果配置了所有必需组件) + // 5. 创建 MemoryEventCoordinator(如果配置了所有必需组件) let (coordinator_handle, memory_event_tx) = if let (Some(llm), Some(emb), Some(_vs)) = (&self.llm_client, &embedding, &vector_store) @@ -174,7 +174,7 @@ impl CortexMemBuilder { // 启动事件协调器 let handle = tokio::spawn(coordinator.start(rx)); - info!("✅ MemoryEventCoordinator started for v2.5 incremental updates"); + info!("✅ MemoryEventCoordinator started for incremental updates"); (Some(handle), Some(tx)) } else { @@ -182,9 +182,9 @@ impl CortexMemBuilder { (None, None) }; - // 6. 创建SessionManager(带 v2.5 memory_event_tx) + // 6. 创建SessionManager(带 memory_event_tx) let session_manager = if let Some(tx) = memory_event_tx { - // v2.5: 使用 MemoryEventCoordinator 的事件通道 + // 使用 MemoryEventCoordinator 的事件通道 if let Some(ref llm) = self.llm_client { SessionManager::with_llm_and_events( filesystem.clone(), @@ -242,7 +242,7 @@ pub struct CortexMem { pub llm_client: Option>, #[allow(dead_code)] event_bus: Arc, - /// v2.5: MemoryEventCoordinator 的后台任务句柄 + /// MemoryEventCoordinator 的后台任务句柄 coordinator_handle: Option>, } diff --git a/cortex-mem-core/src/lib.rs b/cortex-mem-core/src/lib.rs index 9a8684e..85b4aa4 100644 --- a/cortex-mem-core/src/lib.rs +++ b/cortex-mem-core/src/lib.rs @@ -68,7 +68,7 @@ pub mod search; pub mod session; pub mod vector_store; -// New modules for v2.5 incremental update system +// New modules for incremental update system pub mod memory_index; pub mod memory_events; pub mod memory_index_manager; @@ -103,8 +103,7 @@ pub use session::{ }; pub use vector_store::{QdrantVectorStore, VectorStore, parse_vector_id, uri_to_vector_id}; -// New re-exports for v2.5 -// MemoryType from memory_index is the primary type for v2.5 +// MemoryType from memory_index is the primary type for pub use memory_index::{ MemoryIndex, MemoryMetadata, MemoryScope, MemoryType, MemoryUpdateResult, SessionExtractionSummary, diff --git a/cortex-mem-core/src/session/manager.rs b/cortex-mem-core/src/session/manager.rs index 6435c6f..8656bfa 100644 --- a/cortex-mem-core/src/session/manager.rs +++ b/cortex-mem-core/src/session/manager.rs @@ -192,7 +192,7 @@ pub struct SessionManager { config: SessionConfig, llm_client: Option>, event_bus: Option, - /// Optional event sender for v2.5 incremental update system + /// Optional event sender for incremental update system memory_event_tx: Option>, } @@ -274,7 +274,7 @@ impl SessionManager { } } - /// Set the memory event sender for v2.5 incremental update system + /// Set the memory event sender for incremental update system pub fn with_memory_event_tx(mut self, tx: tokio::sync::mpsc::UnboundedSender) -> Self { self.memory_event_tx = Some(tx); self @@ -360,7 +360,7 @@ impl SessionManager { pub async fn close_session(&mut self, thread_id: &str) -> Result { let metadata = self.close_session_metadata_only(thread_id).await?; - // v2.5: fire-and-forget via channel + // fire-and-forget via channel if let Some(ref tx) = self.memory_event_tx { let user_id = metadata.user_id.clone().unwrap_or_else(|| "default".to_string()); let agent_id = metadata.agent_id.clone().unwrap_or_else(|| "default".to_string()); diff --git a/cortex-mem-mcp/src/service.rs b/cortex-mem-mcp/src/service.rs index 67ddeea..fff295e 100644 --- a/cortex-mem-mcp/src/service.rs +++ b/cortex-mem-mcp/src/service.rs @@ -834,7 +834,7 @@ impl ServerHandler for MemoryMcpService { instructions: Some( "Cortex Memory MCP Server - Provides memory management tools for AI assistants.\n\ \n\ - **Automatic Processing (v2.5):**\n\ + **Automatic Processing:**\n\ The server automatically triggers memory extraction and layer generation when:\n\ - Message count reaches threshold (default: 10 messages)\n\ - Session becomes inactive (default: 2 minutes without new messages)\n\ diff --git a/cortex-mem-service/src/handlers/sessions.rs b/cortex-mem-service/src/handlers/sessions.rs index b62cdfb..10ab9d6 100644 --- a/cortex-mem-service/src/handlers/sessions.rs +++ b/cortex-mem-service/src/handlers/sessions.rs @@ -123,7 +123,7 @@ pub async fn add_message( )), }; - // v2.5: Use SessionManager::add_message to trigger MemoryEventCoordinator events + // Use SessionManager::add_message to trigger MemoryEventCoordinator events // This ensures proper event chain for automatic indexing and layer generation let session_mgr = state.session_manager.read().await; let message = session_mgr.add_message(&thread_id, role, payload.content).await?; diff --git a/cortex-mem-service/src/state.rs b/cortex-mem-service/src/state.rs index aa0abce..e801aea 100644 --- a/cortex-mem-service/src/state.rs +++ b/cortex-mem-service/src/state.rs @@ -60,13 +60,13 @@ impl AppState { builder = builder.with_qdrant(qdrant_cfg); } - // v2.5: 使用 MemoryEventCoordinator 进行记忆提取和层级更新 + // 使用 MemoryEventCoordinator 进行记忆提取和层级更新 // 配置协调器(可选,使用默认配置即可) // builder = builder.with_coordinator_config(CoordinatorConfig::default()); // 构建Cortex Memory let cortex = builder.build().await?; - tracing::info!("✅ Cortex Memory initialized with v2.5 MemoryEventCoordinator"); + tracing::info!("✅ Cortex Memory initialized with MemoryEventCoordinator"); // 从Cortex Memory获取组件 let filesystem = cortex.filesystem(); diff --git a/cortex-mem-tools/src/operations.rs b/cortex-mem-tools/src/operations.rs index f875253..dbc77e0 100644 --- a/cortex-mem-tools/src/operations.rs +++ b/cortex-mem-tools/src/operations.rs @@ -41,11 +41,11 @@ pub struct MemoryOperations { pub(crate) default_user_id: String, pub(crate) default_agent_id: String, - /// v2.5: 事件发送器,用于异步触发层级生成 + /// 事件发送器,用于异步触发层级生成 pub(crate) memory_event_tx: Option>, - /// v2.5: 事件协调器引用,用于同步等待后台处理完成 + /// 事件协调器引用,用于同步等待后台处理完成 pub(crate) event_coordinator: Option>, } @@ -150,7 +150,7 @@ impl MemoryOperations { let embedding_client = Arc::new(EmbeddingClient::new(embedding_config)?); tracing::info!("Embedding client initialized"); - // v2.5: Create MemoryEventCoordinator BEFORE SessionManager + // Create MemoryEventCoordinator BEFORE SessionManager let (coordinator, memory_event_tx, event_rx) = cortex_mem_core::MemoryEventCoordinator::new( filesystem.clone(), llm_client.clone(), @@ -163,10 +163,10 @@ impl MemoryOperations { // Start the coordinator event loop in background tokio::spawn(coordinator.start(event_rx)); - tracing::info!("MemoryEventCoordinator started for v2.5 incremental updates"); + tracing::info!("MemoryEventCoordinator started for incremental updates"); let config = SessionConfig::default(); - // Create SessionManager with memory_event_tx for v2.5 integration + // Create SessionManager with memory_event_tx for integration let session_manager = SessionManager::with_llm_and_events( filesystem.clone(), config, diff --git a/examples/cortex-mem-tars/src/app.rs b/examples/cortex-mem-tars/src/app.rs index 462cfa8..58c0a0d 100644 --- a/examples/cortex-mem-tars/src/app.rs +++ b/examples/cortex-mem-tars/src/app.rs @@ -18,7 +18,6 @@ use std::io; use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::sync::mpsc; -// use cortex_mem_core::automation::{AutoExtractor, AutoExtractConfig}; // removed in v2.5 // 音频相关导入 use crate::audio_input; diff --git a/litho.docs/en/1.Overview.md b/litho.docs/en/1.Overview.md index 202d449..a06daa3 100644 --- a/litho.docs/en/1.Overview.md +++ b/litho.docs/en/1.Overview.md @@ -104,7 +104,7 @@ The following components are **within the system boundary** and are developed, m | Component | Description | |---------|-------------| -| **cortex-mem-core** | The central library containing all core business logic: filesystem abstraction (`cortex://`), LLM client wrappers, embedding generation, Qdrant integration, session management, layer generation, extraction engine, search engine, and automation orchestrator. v2.5 adds an event-driven incremental update system (`MemoryEventCoordinator`, `CascadeLayerUpdater`, `LlmResultCache`); v2.6 adds `MemoryCleanupService` based on the Ebbinghaus forgetting curve. | +| **cortex-mem-core** | The central library containing all core business logic: filesystem abstraction (`cortex://`), LLM client wrappers, embedding generation, Qdrant integration, session management, layer generation, extraction engine, search engine, and automation orchestrator. Adds an event-driven incremental update system (`MemoryEventCoordinator`, `CascadeLayerUpdater`, `LlmResultCache`); v2.6 adds `MemoryCleanupService` based on the Ebbinghaus forgetting curve. | | **cortex-mem-cli** | Command-line interface for developers to interact with memory (search, list, get, delete, session, stats). | | **cortex-mem-service** | HTTP REST API server (Axum-based) exposing `/api/v2` endpoints for memory operations and tenant analytics. | | **cortex-mem-mcp** | Model Context Protocol server that exposes memory tools (`query_memory`, `store_memory`, etc.) via stdio for AI assistants. | diff --git a/litho.docs/en/2.Architecture.md b/litho.docs/en/2.Architecture.md index 5bb2fb6..8da4e80 100644 --- a/litho.docs/en/2.Architecture.md +++ b/litho.docs/en/2.Architecture.md @@ -244,7 +244,7 @@ The system comprises **14 distinct domains** organized by technical responsibili | **Search Engine** | Core Business | Semantic search with weighted scoring | VectorEngine, Intent Detection | | **Session Management** | Core Business | Conversation state, message tracking, timelines | SessionManager, Timeline, Message | | **Profile Management** | Core Business | User/agent profile persistence and merging | UserProfile, AgentProfile | -| **Incremental Update System** | Core Business | Event-driven incremental memory updates and cascading layer sync (v2.5) | MemoryEventCoordinator, CascadeLayerUpdater, LlmResultCache | +| **Incremental Update System** | Core Business | Event-driven incremental memory updates and cascading layer sync | MemoryEventCoordinator, CascadeLayerUpdater, LlmResultCache | | **Memory Forgetting Mechanism** | Core Business | Low-strength memory cleanup based on Ebbinghaus forgetting curve (v2.6) | MemoryCleanupService, CleanupStats | | **Application Interface** | Application | Multi-protocol access layer (CLI, HTTP, MCP) | CLI Commands, Axum Handlers, MCP Service | | **Web UI** | Application | Visualization and management interface | Svelte Pages, Tenant Store, API Client | diff --git "a/litho.docs/zh/1\343\200\201\351\241\271\347\233\256\346\246\202\350\277\260.md" "b/litho.docs/zh/1\343\200\201\351\241\271\347\233\256\346\246\202\350\277\260.md" index 7e7de3a..fcecc1c 100644 --- "a/litho.docs/zh/1\343\200\201\351\241\271\347\233\256\346\246\202\350\277\260.md" +++ "b/litho.docs/zh/1\343\200\201\351\241\271\347\233\256\346\246\202\350\277\260.md" @@ -104,7 +104,7 @@ Cortex-Mem在自身拥有什么和依赖什么之间定义了清晰的架构边 | 组件 | 描述 | |---------|-------------| -| **cortex-mem-core** | 包含所有核心业务逻辑的中心库:文件系统抽象(`cortex://`)、LLM客户端封装器、嵌入生成、Qdrant集成、会话管理、层级生成、提取引擎、搜索引擎和自动化编排器。v2.5 新增事件驱动增量更新系统(`MemoryEventCoordinator`、`CascadeLayerUpdater`、`LlmResultCache`);v2.6 新增基于 Ebbinghaus 遗忘曲线的 `MemoryCleanupService`。 | +| **cortex-mem-core** | 包含所有核心业务逻辑的中心库:文件系统抽象(`cortex://`)、LLM客户端封装器、嵌入生成、Qdrant集成、会话管理、层级生成、提取引擎、搜索引擎和自动化编排器。新增事件驱动增量更新系统(`MemoryEventCoordinator`、`CascadeLayerUpdater`、`LlmResultCache`);v2.6 新增基于 Ebbinghaus 遗忘曲线的 `MemoryCleanupService`。 | | **cortex-mem-cli** | 用于开发者交互记忆的命令行界面(搜索、列表、获取、删除、会话、统计)。 | | **cortex-mem-service** | HTTP REST API服务器(基于Axum),暴露`/api/v2`端点用于记忆操作和租户分析。 | | **cortex-mem-mcp** | 模型上下文协议服务器,通过stdio向AI助手暴露记忆工具(`query_memory`、`store_memory`等)。 | diff --git "a/litho.docs/zh/2\343\200\201\346\236\266\346\236\204\346\246\202\350\247\210.md" "b/litho.docs/zh/2\343\200\201\346\236\266\346\236\204\346\246\202\350\247\210.md" index 30dc1a5..5ea2b2c 100644 --- "a/litho.docs/zh/2\343\200\201\346\236\266\346\236\204\346\246\202\350\247\210.md" +++ "b/litho.docs/zh/2\343\200\201\346\236\266\346\236\204\346\246\202\350\247\210.md" @@ -244,8 +244,8 @@ graph TD | **搜索引擎** | 核心业务 | 加权评分的语义搜索 | VectorEngine、意图检测 | | **会话管理** | 核心业务 | 对话状态、消息跟踪、时间线 | SessionManager、Timeline、Message | | **配置文件管理** | 核心业务 | 用户/智能体配置文件持久化和合并 | UserProfile、AgentProfile | -| **增量更新系统** | 核心业务 | 事件驱动的增量记忆更新和级联层级同步(v2.5) | MemoryEventCoordinator、CascadeLayerUpdater、LlmResultCache | -| **记忆遗忘机制** | 核心业务 | 基于Ebbinghaus遗忘曲线的低强度记忆清理(v2.6) | MemoryCleanupService、CleanupStats | +| **增量更新系统** | 核心业务 | 事件驱动的增量记忆更新和级联层级同步 | MemoryEventCoordinator、CascadeLayerUpdater、LlmResultCache | +| **记忆遗忘机制** | 核心业务 | 基于Ebbinghaus遗忘曲线的低强度记忆清理 | MemoryCleanupService、CleanupStats | | **应用接口** | 应用 | 多协议访问层(CLI、HTTP、MCP) | CLI命令、Axum处理器、MCP服务 | | **Web UI** | 应用 | 可视化和管理界面 | Svelte页面、租户Store、API客户端 | | **工具支持** | 应用 | MCP工具定义、Rig框架集成 | 工具定义、操作封装 | From 14b6d0635b99f33e58756d71f5b3c0b7f83e1a98 Mon Sep 17 00:00:00 2001 From: Sopaco Date: Wed, 11 Mar 2026 10:02:06 +0800 Subject: [PATCH 13/20] Add OpenClaw plugin for Cortex Memory with L0/L1/L2 semantic search --- examples/cortex-mem-openclaw/README.md | 186 +++++++++++++ .../cortex-mem-openclaw/openclaw.plugin.json | 62 +++++ examples/cortex-mem-openclaw/package.json | 41 +++ examples/cortex-mem-openclaw/src/client.ts | 206 ++++++++++++++ examples/cortex-mem-openclaw/src/index.ts | 254 ++++++++++++++++++ examples/cortex-mem-openclaw/src/tools.ts | 180 +++++++++++++ examples/cortex-mem-openclaw/tsconfig.json | 20 ++ 7 files changed, 949 insertions(+) create mode 100644 examples/cortex-mem-openclaw/README.md create mode 100644 examples/cortex-mem-openclaw/openclaw.plugin.json create mode 100644 examples/cortex-mem-openclaw/package.json create mode 100644 examples/cortex-mem-openclaw/src/client.ts create mode 100644 examples/cortex-mem-openclaw/src/index.ts create mode 100644 examples/cortex-mem-openclaw/src/tools.ts create mode 100644 examples/cortex-mem-openclaw/tsconfig.json diff --git a/examples/cortex-mem-openclaw/README.md b/examples/cortex-mem-openclaw/README.md new file mode 100644 index 0000000..267aacb --- /dev/null +++ b/examples/cortex-mem-openclaw/README.md @@ -0,0 +1,186 @@ +# Cortex Memory Plugin for OpenClaw + +为 OpenClaw 提供层级语义记忆能力的插件,支持 L0/L1/L2 三层检索。 + +## 功能特性 + +- **层级语义搜索**:L0(摘要)→ L1(概览)→ L2(完整内容)逐层检索 +- **自动向量化**:消息自动生成向量嵌入,支持语义相似度搜索 +- **会话隔离**:支持多会话独立记忆空间 +- **HTTP API**:通过 cortex-mem-service REST API 与核心服务通信 + +## 前置条件 + +1. **cortex-mem-service** 运行中 + ```bash + # 在 cortex-mem 项目根目录 + cargo run -p cortex-mem-service -- --data-dir ./cortex-data + ``` + +2. **Qdrant 向量数据库**(可选,用于向量搜索) +3. **Embedding 服务**(OpenAI 兼容 API) + +## 安装 + +### 方式一:本地链接安装(开发模式) + +```bash +# 在 cortex-mem 项目根目录 +cd examples/cortex-mem-openclaw + +# 安装依赖并构建 +npm install +npm run build + +# 链接到 OpenClaw +openclaw plugins install --link $(pwd) +``` + +### 方式二:npm 发布后安装 + +```bash +openclaw plugins install @cortex-mem/openclaw-plugin +``` + +## 配置 + +在 OpenClaw 配置文件(`~/.openclaw/openclaw.json` 或项目目录)中添加: + +```json +{ + "plugins": { + "entries": { + "cortex-mem": { + "enabled": true, + "config": { + "serviceUrl": "http://127.0.0.1:8085", + "defaultSessionId": "default", + "searchLimit": 10, + "minScore": 0.6 + } + } + } + } +} +``` + +### 配置说明 + +| 参数 | 类型 | 默认值 | 说明 | +|------|------|--------|------| +| `serviceUrl` | string | `http://127.0.0.1:8085` | cortex-mem-service 的 HTTP 端点 | +| `defaultSessionId` | string | `default` | 默认会话 ID | +| `searchLimit` | integer | 10 | 搜索结果最大数量 | +| `minScore` | number | 0.6 | 最小相关性分数阈值 | + +## 工具说明 + +### cortex_search + +层级语义搜索,返回相关记忆片段。 + +```json +{ + "query": "用户偏好设置", + "scope": "session-123", // 可选,限定搜索范围 + "limit": 10, + "min_score": 0.6 +} +``` + +返回: +```json +{ + "results": [ + { + "uri": "cortex://session/abc/timeline/2026-03/11/10_30_00_xxx.md", + "score": 0.89, + "snippet": "用户偏好深色主题..." + } + ], + "total": 1 +} +``` + +### cortex_recall + +分层召回记忆,支持指定返回层级。 + +```json +{ + "query": "项目架构决策", + "layers": ["L0", "L1"], // L0=摘要, L1=概览, L2=完整内容 + "scope": "session-123", + "limit": 5 +} +``` + +### cortex_add_memory + +向指定会话添加记忆。 + +```json +{ + "content": "用户选择了 PostgreSQL 作为主数据库", + "role": "assistant", // user/assistant/system + "session_id": "session-123" // 可选,默认使用 defaultSessionId +} +``` + +### cortex_list_sessions + +列出所有记忆会话。 + +```json +{} +``` + +返回: +```json +{ + "sessions": [ + { + "thread_id": "session-123", + "status": "active", + "message_count": 42, + "created_at": "2026-03-11T10:00:00Z" + } + ] +} +``` + +## 架构 + +``` +OpenClaw Gateway + │ + ▼ +cortex-mem-plugin (TypeScript) + │ + ▼ HTTP REST +cortex-mem-service (Rust) + │ + ├─► Qdrant (向量存储) + ├─► CortexFilesystem (文件存储) + └─► LLM (层级生成) +``` + +## 开发 + +```bash +# 安装依赖 +npm install + +# 开发模式(监视编译) +npm run dev + +# 构建 +npm run build + +# 运行测试 +npm test +``` + +## 许可证 + +MIT diff --git a/examples/cortex-mem-openclaw/openclaw.plugin.json b/examples/cortex-mem-openclaw/openclaw.plugin.json new file mode 100644 index 0000000..a37f958 --- /dev/null +++ b/examples/cortex-mem-openclaw/openclaw.plugin.json @@ -0,0 +1,62 @@ +{ + "id": "cortex-mem", + "name": "Cortex Memory", + "version": "0.1.0", + "description": "Layered semantic memory with L0/L1/L2 tiered retrieval for OpenClaw agents", + "kind": "memory", + "entry": "dist/index.js", + "configSchema": { + "type": "object", + "properties": { + "serviceUrl": { + "type": "string", + "description": "Cortex Memory service URL", + "default": "http://127.0.0.1:8085" + }, + "defaultSessionId": { + "type": "string", + "description": "Default session ID for memory operations (defaults to 'default')", + "default": "default" + }, + "searchLimit": { + "type": "integer", + "description": "Default number of search results", + "default": 10, + "minimum": 1, + "maximum": 50 + }, + "minScore": { + "type": "number", + "description": "Minimum relevance score for search results", + "default": 0.6, + "minimum": 0, + "maximum": 1 + } + }, + "required": ["serviceUrl"] + }, + "uiHints": { + "serviceUrl": { + "label": "Service URL", + "description": "The HTTP endpoint of your cortex-mem-service instance" + }, + "defaultSessionId": { + "label": "Default Session", + "description": "Session ID to use when no thread is specified" + }, + "searchLimit": { + "label": "Search Limit", + "description": "Maximum number of results to return from search" + }, + "minScore": { + "label": "Minimum Score", + "description": "Filter results below this relevance threshold" + } + }, + "tools": [ + "cortex_search", + "cortex_recall", + "cortex_add_memory", + "cortex_list_sessions" + ] +} diff --git a/examples/cortex-mem-openclaw/package.json b/examples/cortex-mem-openclaw/package.json new file mode 100644 index 0000000..0c64a07 --- /dev/null +++ b/examples/cortex-mem-openclaw/package.json @@ -0,0 +1,41 @@ +{ + "name": "@cortex-mem/openclaw-plugin", + "version": "0.1.0", + "description": "Cortex Memory plugin for OpenClaw - Layered semantic memory with L0/L1/L2 tiered retrieval", + "main": "dist/index.js", + "types": "dist/index.d.ts", + "scripts": { + "build": "tsc", + "dev": "tsc --watch", + "lint": "eslint src/**/*.ts", + "test": "vitest" + }, + "keywords": [ + "openclaw", + "memory", + "semantic-search", + "vector-search", + "ai", + "agent" + ], + "author": "Cortex-Mem Contributors", + "license": "MIT", + "openclaw": { + "extensions": [ + "dist/index.js" + ] + }, + "devDependencies": { + "@types/node": "^22.0.0", + "typescript": "^5.7.0", + "vitest": "^2.0.0" + }, + "engines": { + "node": ">=22.0.0" + }, + "files": [ + "dist/", + "openclaw.plugin.json", + "README.md" + ] +} diff --git a/examples/cortex-mem-openclaw/src/client.ts b/examples/cortex-mem-openclaw/src/client.ts new file mode 100644 index 0000000..17215cf --- /dev/null +++ b/examples/cortex-mem-openclaw/src/client.ts @@ -0,0 +1,206 @@ +/** + * Cortex Memory API Client + * + * HTTP client for cortex-mem-service REST API + */ + +// Response types +interface ApiResponse { + success: boolean; + data?: T; + error?: string; + timestamp: string; +} + +// Search types +export interface SearchRequest { + query: string; + thread?: string; + limit?: number; + min_score?: number; +} + +export interface SearchResult { + uri: string; + score: number; + snippet: string; + content?: string; + source: string; +} + +// Session types +export interface SessionResponse { + thread_id: string; + status: string; + message_count: number; + created_at: string; + updated_at: string; +} + +export interface CreateSessionRequest { + thread_id?: string; + title?: string; + user_id?: string; + agent_id?: string; +} + +export interface AddMessageRequest { + role: 'user' | 'assistant' | 'system'; + content: string; +} + +// Layer types +export type ContextLayer = 'L0' | 'L1' | 'L2'; + +export interface LayeredRecallResult { + uri: string; + score: number; + abstract?: string; // L0: ~100 tokens + overview?: string; // L1: ~2000 tokens + content?: string; // L2: full content +} + +/** + * Cortex Memory API Client + */ +export class CortexMemClient { + private baseUrl: string; + + constructor(baseUrl: string = 'http://127.0.0.1:8085') { + this.baseUrl = baseUrl.replace(/\/$/, ''); + } + + /** + * Layered semantic search (L0 -> L1 -> L2 tiered retrieval) + */ + async search(request: SearchRequest): Promise { + const response = await this.post('/api/v2/search', request); + return response; + } + + /** + * Quick search returning only L0 abstracts + */ + async find(query: string, scope?: string, limit: number = 5): Promise { + return this.search({ + query, + thread: scope, + limit, + min_score: 0.5, + }); + } + + /** + * Layered recall with specified detail level + * + * @param query - Search query + * @param layers - Which layers to return (L0, L1, L2) + * @param scope - Optional session/thread scope + * @param limit - Maximum results + */ + async recall( + query: string, + layers: ContextLayer[] = ['L0'], + scope?: string, + limit: number = 10 + ): Promise { + // First do search to get URIs + const searchResults = await this.search({ + query, + thread: scope, + limit, + }); + + // For now, return search results with snippets + // In a full implementation, we would make additional calls + // to get L1 overview and L2 content based on requested layers + return searchResults.map(result => ({ + uri: result.uri, + score: result.score, + abstract: result.snippet, // L0 from snippet + overview: undefined, // Would need additional API call + content: result.content, // L2 if available + })); + } + + /** + * List all sessions + */ + async listSessions(): Promise { + const response = await this.get('/api/v2/sessions'); + return response; + } + + /** + * Create a new session + */ + async createSession(request: CreateSessionRequest = {}): Promise { + const response = await this.post('/api/v2/sessions', request); + return response; + } + + /** + * Add a message to a session + */ + async addMessage(threadId: string, message: AddMessageRequest): Promise { + const response = await this.post( + `/api/v2/sessions/${threadId}/messages`, + message + ); + return response; + } + + /** + * Close a session + */ + async closeSession(threadId: string): Promise { + const response = await this.post( + `/api/v2/sessions/${threadId}/close`, + {} + ); + return response; + } + + /** + * Health check + */ + async healthCheck(): Promise { + try { + const response = await fetch(`${this.baseUrl}/health`); + return response.ok; + } catch { + return false; + } + } + + // Private helpers + private async get(path: string): Promise { + const response = await fetch(`${this.baseUrl}${path}`); + if (!response.ok) { + throw new Error(`API error: ${response.status} ${response.statusText}`); + } + const data: ApiResponse = await response.json(); + if (!data.success) { + throw new Error(data.error || 'API request failed'); + } + return data.data!; + } + + private async post(path: string, body: object): Promise { + const response = await fetch(`${this.baseUrl}${path}`, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(body), + }); + if (!response.ok) { + throw new Error(`API error: ${response.status} ${response.statusText}`); + } + const data: ApiResponse = await response.json(); + if (!data.success) { + throw new Error(data.error || 'API request failed'); + } + return data.data!; + } +} diff --git a/examples/cortex-mem-openclaw/src/index.ts b/examples/cortex-mem-openclaw/src/index.ts new file mode 100644 index 0000000..90aa498 --- /dev/null +++ b/examples/cortex-mem-openclaw/src/index.ts @@ -0,0 +1,254 @@ +/** + * Cortex Memory Plugin for OpenClaw + * + * Provides layered semantic memory with L0/L1/L2 tiered retrieval. + * + * Installation: + * openclaw plugins install @cortex-mem/openclaw-plugin + * + * Configuration (in openclaw.json): + * { + * "plugins": { + * "entries": { + * "cortex-mem": { + * "enabled": true, + * "config": { + * "serviceUrl": "http://127.0.0.1:8085", + * "defaultSessionId": "default", + * "searchLimit": 10, + * "minScore": 0.6 + * } + * } + * } + * } + * } + */ + +import { CortexMemClient } from './client.js'; +import { toolSchemas, type CortexSearchInput, type CortexRecallInput, type CortexAddMemoryInput } from './tools.js'; + +// Plugin configuration +interface PluginConfig { + serviceUrl: string; + defaultSessionId: string; + searchLimit: number; + minScore: number; +} + +// OpenClaw Plugin API types (minimal definitions) +interface PluginAPI { + getConfig(): PluginConfig; + registerTool(tool: ToolDefinition): void; + logger: { + info: (msg: string, ...args: unknown[]) => void; + warn: (msg: string, ...args: unknown[]) => void; + error: (msg: string, ...args: unknown[]) => void; + }; +} + +interface ToolDefinition { + name: string; + description: string; + inputSchema: object; + handler: (args: Record) => Promise; +} + +interface ToolResult { + content?: string; + error?: string; + [key: string]: unknown; +} + +// Export plugin +export default function cortexMemPlugin(api: PluginAPI) { + const config = api.getConfig(); + const client = new CortexMemClient(config.serviceUrl); + + api.logger.info('Cortex Memory plugin initializing...'); + api.logger.info(`Service URL: ${config.serviceUrl}`); + + // Register cortex_search tool + api.registerTool({ + name: toolSchemas.cortex_search.name, + description: toolSchemas.cortex_search.description, + inputSchema: toolSchemas.cortex_search.inputSchema, + handler: async (args: Record) => { + const input = args as CortexSearchInput; + + try { + const results = await client.search({ + query: input.query, + thread: input.scope, + limit: input.limit ?? config.searchLimit, + min_score: input.min_score ?? config.minScore, + }); + + const formattedResults = results + .map((r, i) => `${i + 1}. [Score: ${r.score.toFixed(2)}] ${r.snippet}\n URI: ${r.uri}`) + .join('\n\n'); + + return { + content: `Found ${results.length} results for "${input.query}":\n\n${formattedResults}`, + results: results.map(r => ({ + uri: r.uri, + score: r.score, + snippet: r.snippet, + })), + total: results.length, + }; + } catch (error) { + const message = error instanceof Error ? error.message : String(error); + api.logger.error(`cortex_search failed: ${message}`); + return { error: `Search failed: ${message}` }; + } + }, + }); + + // Register cortex_recall tool + api.registerTool({ + name: toolSchemas.cortex_recall.name, + description: toolSchemas.cortex_recall.description, + inputSchema: toolSchemas.cortex_recall.inputSchema, + handler: async (args: Record) => { + const input = args as CortexRecallInput; + + try { + const results = await client.recall( + input.query, + input.layers ?? ['L0'], + input.scope, + input.limit ?? 5 + ); + + const layerLabels: Record = { + L0: 'Abstract', + L1: 'Overview', + L2: 'Full Content', + }; + + const requestedLayers = input.layers ?? ['L0']; + + const formattedResults = results + .map((r, i) => { + let content = `${i + 1}. [Score: ${r.score.toFixed(2)}] URI: ${r.uri}\n`; + + if (requestedLayers.includes('L0') && r.abstract) { + content += ` [${layerLabels['L0']}]: ${r.abstract}\n`; + } + if (requestedLayers.includes('L1') && r.overview) { + content += ` [${layerLabels['L1']}]: ${r.overview.substring(0, 500)}...\n`; + } + if (requestedLayers.includes('L2') && r.content) { + content += ` [${layerLabels['L2']}]: ${r.content.substring(0, 500)}...\n`; + } + + return content; + }) + .join('\n'); + + return { + content: `Recalled ${results.length} memories:\n\n${formattedResults}`, + results, + total: results.length, + }; + } catch (error) { + const message = error instanceof Error ? error.message : String(error); + api.logger.error(`cortex_recall failed: ${message}`); + return { error: `Recall failed: ${message}` }; + } + }, + }); + + // Register cortex_add_memory tool + api.registerTool({ + name: toolSchemas.cortex_add_memory.name, + description: toolSchemas.cortex_add_memory.description, + inputSchema: toolSchemas.cortex_add_memory.inputSchema, + handler: async (args: Record) => { + const input = args as CortexAddMemoryInput; + + try { + const sessionId = input.session_id ?? config.defaultSessionId; + const messageUri = await client.addMessage(sessionId, { + role: input.role ?? 'user', + content: input.content, + }); + + return { + content: `Memory stored successfully in session "${sessionId}".\nURI: ${messageUri}`, + success: true, + message_uri: messageUri, + }; + } catch (error) { + const message = error instanceof Error ? error.message : String(error); + api.logger.error(`cortex_add_memory failed: ${message}`); + return { error: `Failed to add memory: ${message}` }; + } + }, + }); + + // Register cortex_list_sessions tool + api.registerTool({ + name: toolSchemas.cortex_list_sessions.name, + description: toolSchemas.cortex_list_sessions.description, + inputSchema: toolSchemas.cortex_list_sessions.inputSchema, + handler: async () => { + try { + const sessions = await client.listSessions(); + + if (sessions.length === 0) { + return { content: 'No sessions found.' }; + } + + const formattedSessions = sessions + .map((s, i) => { + const created = new Date(s.created_at).toLocaleDateString(); + return `${i + 1}. ${s.thread_id} (${s.status}, ${s.message_count} messages, created ${created})`; + }) + .join('\n'); + + return { + content: `Found ${sessions.length} sessions:\n\n${formattedSessions}`, + sessions: sessions.map(s => ({ + thread_id: s.thread_id, + status: s.status, + message_count: s.message_count, + created_at: s.created_at, + })), + }; + } catch (error) { + const message = error instanceof Error ? error.message : String(error); + api.logger.error(`cortex_list_sessions failed: ${message}`); + return { error: `Failed to list sessions: ${message}` }; + } + }, + }); + + api.logger.info('Cortex Memory plugin initialized successfully'); + + return { + id: 'cortex-mem', + name: 'Cortex Memory', + version: '0.1.0', + }; +} + +// Also support object export style +export const plugin = { + id: 'cortex-mem', + name: 'Cortex Memory', + version: '0.1.0', + configSchema: { + type: 'object', + properties: { + serviceUrl: { type: 'string', default: 'http://127.0.0.1:8085' }, + defaultSessionId: { type: 'string', default: 'default' }, + searchLimit: { type: 'integer', default: 10 }, + minScore: { type: 'number', default: 0.6 }, + }, + required: ['serviceUrl'], + }, + register(api: PluginAPI) { + return cortexMemPlugin(api); + }, +}; diff --git a/examples/cortex-mem-openclaw/src/tools.ts b/examples/cortex-mem-openclaw/src/tools.ts new file mode 100644 index 0000000..8669326 --- /dev/null +++ b/examples/cortex-mem-openclaw/src/tools.ts @@ -0,0 +1,180 @@ +/** + * Tool definitions for Cortex Memory plugin + */ + +import type { ContextLayer } from './client.js'; + +// Tool input schemas (JSON Schema format) +export const toolSchemas = { + cortex_search: { + name: 'cortex_search', + description: `Layered semantic search across memory using L0/L1/L2 tiered retrieval. +Returns relevant memories ranked by relevance score. + +Use this tool when you need to: +- Find past conversations or decisions +- Search for specific information across all sessions +- Discover related memories by semantic similarity`, + inputSchema: { + type: 'object', + properties: { + query: { + type: 'string', + description: 'The search query - can be natural language or keywords', + }, + scope: { + type: 'string', + description: 'Optional session/thread ID to limit search scope', + }, + limit: { + type: 'integer', + description: 'Maximum number of results to return (default: 10)', + default: 10, + }, + min_score: { + type: 'number', + description: 'Minimum relevance score threshold (0-1, default: 0.6)', + default: 0.6, + }, + }, + required: ['query'], + }, + }, + + cortex_recall: { + name: 'cortex_recall', + description: `Recall memories with layered detail levels (L0/L1/L2). + +L0 (Abstract): ~100 tokens - Quick summary for relevance判断 +L1 (Overview): ~2000 tokens - Key points and context +L2 (Full): Complete content - Use only when you need full details + +Use this when you need more context than what cortex_search provides.`, + inputSchema: { + type: 'object', + properties: { + query: { + type: 'string', + description: 'The search query', + }, + layers: { + type: 'array', + items: { + type: 'string', + enum: ['L0', 'L1', 'L2'], + }, + description: 'Which detail layers to return (default: ["L0"])', + default: ['L0'], + }, + scope: { + type: 'string', + description: 'Optional session/thread ID to limit search scope', + }, + limit: { + type: 'integer', + description: 'Maximum number of results (default: 5)', + default: 5, + }, + }, + required: ['query'], + }, + }, + + cortex_add_memory: { + name: 'cortex_add_memory', + description: `Add a message to memory for a specific session. +This stores the message and automatically triggers: +- Vector embedding for semantic search +- L0/L1 layer generation (async) + +Use this to persist important information that should be searchable later.`, + inputSchema: { + type: 'object', + properties: { + content: { + type: 'string', + description: 'The content to store in memory', + }, + role: { + type: 'string', + enum: ['user', 'assistant', 'system'], + description: 'Role of the message sender (default: user)', + default: 'user', + }, + session_id: { + type: 'string', + description: 'Session/thread ID (uses default if not specified)', + }, + }, + required: ['content'], + }, + }, + + cortex_list_sessions: { + name: 'cortex_list_sessions', + description: `List all memory sessions with their status. +Shows session IDs, message counts, and creation/update times.`, + inputSchema: { + type: 'object', + properties: {}, + }, + }, +} as const; + +// Tool input types (inferred from schemas) +export interface CortexSearchInput { + query: string; + scope?: string; + limit?: number; + min_score?: number; +} + +export interface CortexRecallInput { + query: string; + layers?: ContextLayer[]; + scope?: string; + limit?: number; +} + +export interface CortexAddMemoryInput { + content: string; + role?: 'user' | 'assistant' | 'system'; + session_id?: string; +} + +export interface CortexListSessionsInput {} + +// Tool output types +export interface CortexSearchOutput { + results: Array<{ + uri: string; + score: number; + snippet: string; + }>; + total: number; +} + +export interface CortexRecallOutput { + results: Array<{ + uri: string; + score: number; + abstract?: string; + overview?: string; + content?: string; + }>; + total: number; +} + +export interface CortexAddMemoryOutput { + success: boolean; + message_uri: string; +} + +export interface CortexListSessionsOutput { + sessions: Array<{ + thread_id: string; + status: string; + message_count: number; + created_at: string; + }>; +} diff --git a/examples/cortex-mem-openclaw/tsconfig.json b/examples/cortex-mem-openclaw/tsconfig.json new file mode 100644 index 0000000..b37b6b9 --- /dev/null +++ b/examples/cortex-mem-openclaw/tsconfig.json @@ -0,0 +1,20 @@ +{ + "compilerOptions": { + "target": "ES2022", + "module": "NodeNext", + "moduleResolution": "NodeNext", + "lib": ["ES2022"], + "outDir": "./dist", + "rootDir": "./src", + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "declaration": true, + "declarationMap": true, + "sourceMap": true, + "resolveJsonModule": true + }, + "include": ["src/**/*"], + "exclude": ["node_modules", "dist"] +} From df570451cf2bc020a045742ce5c3787ca846d0cf Mon Sep 17 00:00:00 2001 From: Sopaco Date: Wed, 11 Mar 2026 12:00:34 +0800 Subject: [PATCH 14/20] Refactor event system and improve vector store handling Introduce MemoryEventCoordinator for unified event processing and ensure stable content hashing with SHA-256. Enhance TARS UI with better error notifications. --- cortex-mem-core/src/automation/manager.rs | 51 +- cortex-mem-core/src/builder.rs | 103 +- .../src/incremental_memory_updater.rs | 1270 +++++------------ cortex-mem-core/src/lib.rs | 2 +- cortex-mem-core/src/memory_cleanup.rs | 42 +- cortex-mem-core/src/memory_index_manager.rs | 13 +- cortex-mem-core/src/search/vector_engine.rs | 175 ++- cortex-mem-service/src/handlers/sessions.rs | 14 + cortex-mem-service/src/state.rs | 86 +- cortex-mem-tools/src/operations.rs | 99 +- examples/cortex-mem-tars/src/app.rs | 32 +- examples/cortex-mem-tars/src/main.rs | 6 +- 12 files changed, 843 insertions(+), 1050 deletions(-) diff --git a/cortex-mem-core/src/automation/manager.rs b/cortex-mem-core/src/automation/manager.rs index eece9a3..f89128e 100644 --- a/cortex-mem-core/src/automation/manager.rs +++ b/cortex-mem-core/src/automation/manager.rs @@ -2,6 +2,7 @@ use crate::{ Result, automation::AutoIndexer, events::{CortexEvent, SessionEvent}, + memory_events::{ChangeType, MemoryEvent}, }; use std::collections::HashSet; use std::sync::Arc; @@ -35,10 +36,15 @@ impl Default for AutomationConfig { /// 自动化管理器 /// -/// 职责 -/// - 监听 `MessageAdded` 事件,将新消息内容(L2 级别)索引到向量数据库 +/// ## 职责 +/// 监听 `MessageAdded` 事件,将新消息内容(L2 级别)索引到向量数据库。 /// -/// 不再负责: +/// ## 事件系统集成 +/// - 输入:旧的 `EventBus`(`CortexEvent`)—— 来自 `SessionManager` 的消息通知 +/// - 输出(可选):向 `MemoryEventCoordinator` 的 `MemoryEvent::VectorSyncNeeded` 通道 +/// 发送索引请求,由协调器统一调度;若未配置则直接调用 `AutoIndexer`(兼容旧路径) +/// +/// ## 不再负责 /// - 记忆提取(由 `MemoryEventCoordinator` 统一处理) /// - L0/L1 层级文件生成(由 `CascadeLayerUpdater` 统一处理) /// - Session 关闭时的全量索引(由 `VectorSyncManager` 统一处理) @@ -47,16 +53,38 @@ pub struct AutomationManager { config: AutomationConfig, /// 并发限制信号量 semaphore: Arc, + /// Optional: 向 MemoryEventCoordinator 发送 VectorSyncNeeded 事件 + /// 若已配置,优先通过协调器调度,而非直接调用 AutoIndexer + memory_event_tx: Option>, } impl AutomationManager { - /// 创建自动化管理器 + /// 创建自动化管理器(兼容旧路径,不使用 MemoryEventCoordinator) pub fn new(indexer: Arc, config: AutomationConfig) -> Self { let semaphore = Arc::new(Semaphore::new(config.max_concurrent_tasks)); Self { indexer, config, semaphore, + memory_event_tx: None, + } + } + + /// 创建自动化管理器,并接入 MemoryEventCoordinator 通道 + /// + /// 推荐:当 `MemoryEventCoordinator` 可用时使用此构造函数, + /// 将 L2 索引请求路由到协调器,实现统一调度。 + pub fn with_memory_events( + indexer: Arc, + config: AutomationConfig, + memory_event_tx: mpsc::UnboundedSender, + ) -> Self { + let semaphore = Arc::new(Semaphore::new(config.max_concurrent_tasks)); + Self { + indexer, + config, + semaphore, + memory_event_tx: Some(memory_event_tx), } } @@ -155,7 +183,22 @@ impl AutomationManager { } /// 索引单个 session 的 L2 消息内容 + /// + /// 优先通过 `MemoryEventCoordinator` 调度(`VectorSyncNeeded` 事件); + /// 若未配置则直接调用 `AutoIndexer`(兼容旧路径)。 async fn index_session_l2(&self, session_id: &str) -> Result<()> { + // 优先路径:通过 MemoryEventCoordinator 统一调度 + if let Some(ref tx) = self.memory_event_tx { + let session_uri = format!("cortex://session/{}", session_id); + let _ = tx.send(MemoryEvent::VectorSyncNeeded { + file_uri: session_uri, + change_type: ChangeType::Update, + }); + info!("AutomationManager: dispatched VectorSyncNeeded for session {}", session_id); + return Ok(()); + } + + // 兼容路径:直接调用 AutoIndexer let _permit = self.semaphore.acquire().await; match self.indexer.index_thread(session_id).await { Ok(stats) => { diff --git a/cortex-mem-core/src/builder.rs b/cortex-mem-core/src/builder.rs index c90dd7b..1831164 100644 --- a/cortex-mem-core/src/builder.rs +++ b/cortex-mem-core/src/builder.rs @@ -94,74 +94,35 @@ impl CortexMemBuilder { }; // 3. 初始化Qdrant向量存储(可选) - let vector_store: Option> = if let Some(ref cfg) = self.qdrant_config { - match QdrantVectorStore::new(cfg).await { - Ok(store) => { - info!("Qdrant vector store connected: {}", cfg.url); - Some(Arc::new(store)) - } - Err(e) => { - warn!("Failed to connect to Qdrant, vector search disabled: {}", e); - None + // 同时保留具体类型(供 MemoryEventCoordinator 使用)和 trait object(供 VectorStore 接口使用) + let (qdrant_store_typed, vector_store): (Option>, Option>) = + if let Some(ref cfg) = self.qdrant_config { + match QdrantVectorStore::new(cfg).await { + Ok(store) => { + info!("Qdrant vector store connected: {}", cfg.url); + let typed = Arc::new(store); + let dyn_store: Arc = typed.clone(); + (Some(typed), Some(dyn_store)) + } + Err(e) => { + warn!("Failed to connect to Qdrant, vector search disabled: {}", e); + (None, None) + } } - } - } else { - None - }; + } else { + (None, None) + }; // 4. 创建事件总线(用于向后兼容) let (event_bus, _old_event_rx) = EventBus::new(); let event_bus = Arc::new(event_bus); // 5. 创建 MemoryEventCoordinator(如果配置了所有必需组件) - let (coordinator_handle, memory_event_tx) = - if let (Some(llm), Some(emb), Some(_vs)) = - (&self.llm_client, &embedding, &vector_store) + let (coordinator_handle, memory_event_tx) = + if let (Some(llm), Some(emb), Some(qdrant_store)) = + (&self.llm_client, &embedding, &qdrant_store_typed) { - // 将 VectorStore trait object 转换为 QdrantVectorStore - // 由于我们需要具体类型,这里重新从配置创建 - let qdrant_store = if let Some(ref cfg) = self.qdrant_config { - match QdrantVectorStore::new(cfg).await { - Ok(store) => Arc::new(store), - Err(e) => { - warn!("Failed to create QdrantVectorStore for coordinator: {}", e); - let fs = filesystem.clone(); - return Ok(CortexMem { - filesystem: fs.clone(), - session_manager: Arc::new(RwLock::new( - SessionManager::with_event_bus( - fs, - self.session_config, - event_bus.as_ref().clone(), - ) - )), - embedding, - vector_store, - llm_client: self.llm_client, - event_bus, - coordinator_handle: None, - }); - } - } - } else { - warn!("No Qdrant config available for coordinator"); - let fs = filesystem.clone(); - return Ok(CortexMem { - filesystem: fs.clone(), - session_manager: Arc::new(RwLock::new( - SessionManager::with_event_bus( - fs, - self.session_config, - event_bus.as_ref().clone(), - ) - )), - embedding, - vector_store, - llm_client: self.llm_client, - event_bus, - coordinator_handle: None, - }); - }; + let qdrant_store = qdrant_store.clone(); let config = self.coordinator_config.unwrap_or_default(); let (coordinator, tx, rx) = MemoryEventCoordinator::new_with_config( @@ -183,7 +144,9 @@ impl CortexMemBuilder { }; // 6. 创建SessionManager(带 memory_event_tx) - let session_manager = if let Some(tx) = memory_event_tx { + // Clone the sender so we can keep one for CortexMem's public getter. + let memory_event_tx_for_session = memory_event_tx.clone(); + let session_manager = if let Some(tx) = memory_event_tx_for_session { // 使用 MemoryEventCoordinator 的事件通道 if let Some(ref llm) = self.llm_client { SessionManager::with_llm_and_events( @@ -228,6 +191,8 @@ impl CortexMemBuilder { vector_store, llm_client: self.llm_client, event_bus, + qdrant_store_typed, + memory_event_tx, coordinator_handle, }) } @@ -242,6 +207,10 @@ pub struct CortexMem { pub llm_client: Option>, #[allow(dead_code)] event_bus: Arc, + /// Typed Qdrant store (for consumers that need Arc) + qdrant_store_typed: Option>, + /// Memory event sender (for VectorSearchEngine / AutomationManager wiring) + memory_event_tx: Option>, /// MemoryEventCoordinator 的后台任务句柄 coordinator_handle: Option>, } @@ -272,6 +241,18 @@ impl CortexMem { self.llm_client.clone() } + /// 获取具体类型的 Qdrant 存储(供需要 Arc 的消费者使用) + pub fn qdrant_store(&self) -> Option> { + self.qdrant_store_typed.clone() + } + + /// 获取 memory event sender(用于 VectorSearchEngine / AutomationManager 接入遗忘机制) + pub fn memory_event_tx( + &self, + ) -> Option> { + self.memory_event_tx.clone() + } + /// 优雅关闭 pub async fn shutdown(self) -> Result<()> { info!("Shutting down CortexMem..."); diff --git a/cortex-mem-core/src/incremental_memory_updater.rs b/cortex-mem-core/src/incremental_memory_updater.rs index c611958..870dd9f 100644 --- a/cortex-mem-core/src/incremental_memory_updater.rs +++ b/cortex-mem-core/src/incremental_memory_updater.rs @@ -2,6 +2,13 @@ //! //! Handles incremental updates to memories with version tracking. //! Supports create, update, delete operations with proper deduplication. +//! +//! ## Generic Processing +//! +//! All memory types share the same update flow via the `MemoryItem` trait: +//! `find_existing → format_content → hash → should_update? → create / update` +//! This eliminates per-type boilerplate and keeps each memory type as a thin +//! trait implementation. use crate::filesystem::{CortexFilesystem, FilesystemOperations}; use crate::llm::LLMClient; @@ -17,6 +24,161 @@ use std::sync::Arc; use tokio::sync::mpsc; use tracing::{debug, info}; +// ──────────────────────────────────────────────────────────────────────────── +// MemoryItem trait — the single abstraction that replaces 8 process_xxx fns +// ──────────────────────────────────────────────────────────────────────────── + +/// Trait implemented by every extracted memory type. +/// +/// This is the key abstraction that allows `IncrementalMemoryUpdater` to handle +/// all memory types through a single generic `process_items` method. +pub trait MemoryItem { + /// The primary key used for matching existing memories (e.g. topic, name, person) + fn key(&self) -> String; + + /// Which `MemoryType` this item maps to + fn memory_type(&self) -> MemoryType; + + /// Confidence score (0.0–1.0) + fn confidence(&self) -> f32; + + /// Render the memory as Markdown content + fn format_content(&self) -> String; + + /// ID prefix used when creating a new memory (e.g. "pref", "entity") + fn id_prefix(&self) -> &'static str; + + /// Sub-directory under the scope root where files are stored (e.g. "preferences") + fn file_dir(&self) -> &'static str; +} + +// ── Implementations ───────────────────────────────────────────────────────── + +impl MemoryItem for PreferenceMemory { + fn key(&self) -> String { self.topic.clone() } + fn memory_type(&self) -> MemoryType { MemoryType::Preference } + fn confidence(&self) -> f32 { self.confidence } + fn id_prefix(&self) -> &'static str { "pref" } + fn file_dir(&self) -> &'static str { "preferences" } + fn format_content(&self) -> String { + format!( + "# {}\n\n{}\n\n**Confidence**: {:.2}", + self.topic, self.preference, self.confidence + ) + } +} + +impl MemoryItem for EntityMemory { + fn key(&self) -> String { self.name.clone() } + fn memory_type(&self) -> MemoryType { MemoryType::Entity } + fn confidence(&self) -> f32 { 0.9 } + fn id_prefix(&self) -> &'static str { "entity" } + fn file_dir(&self) -> &'static str { "entities" } + fn format_content(&self) -> String { + format!( + "# {}\n\n**Type**: {}\n\n**Description**: {}\n\n**Context**: {}", + self.name, self.entity_type, self.description, self.context + ) + } +} + +impl MemoryItem for EventMemory { + fn key(&self) -> String { self.title.clone() } + fn memory_type(&self) -> MemoryType { MemoryType::Event } + fn confidence(&self) -> f32 { 0.8 } + fn id_prefix(&self) -> &'static str { "event" } + fn file_dir(&self) -> &'static str { "events" } + fn format_content(&self) -> String { + let timestamp = self.timestamp.as_deref().unwrap_or("N/A"); + format!( + "# {}\n\n**Type**: {}\n\n**Summary**: {}\n\n**Timestamp**: {}", + self.title, self.event_type, self.summary, timestamp + ) + } +} + +impl MemoryItem for CaseMemory { + fn key(&self) -> String { self.title.clone() } + fn memory_type(&self) -> MemoryType { MemoryType::Case } + fn confidence(&self) -> f32 { 0.9 } + fn id_prefix(&self) -> &'static str { "case" } + fn file_dir(&self) -> &'static str { "cases" } + fn format_content(&self) -> String { + let lessons = self + .lessons_learned + .iter() + .map(|l| format!("- {}", l)) + .collect::>() + .join("\n"); + format!( + "# {}\n\n## Problem\n\n{}\n\n## Solution\n\n{}\n\n## Lessons Learned\n\n{}", + self.title, self.problem, self.solution, lessons + ) + } +} + +impl MemoryItem for PersonalInfoMemory { + fn key(&self) -> String { self.category.clone() } + fn memory_type(&self) -> MemoryType { MemoryType::PersonalInfo } + fn confidence(&self) -> f32 { self.confidence } + fn id_prefix(&self) -> &'static str { "info" } + fn file_dir(&self) -> &'static str { "personal_info" } + fn format_content(&self) -> String { + format!( + "# {}\n\n{}\n\n**Confidence**: {:.2}", + self.category, self.content, self.confidence + ) + } +} + +impl MemoryItem for WorkHistoryMemory { + fn key(&self) -> String { format!("{}_{}", self.company, self.role) } + fn memory_type(&self) -> MemoryType { MemoryType::WorkHistory } + fn confidence(&self) -> f32 { self.confidence } + fn id_prefix(&self) -> &'static str { "work" } + fn file_dir(&self) -> &'static str { "work_history" } + fn format_content(&self) -> String { + let duration = self.duration.as_deref().unwrap_or("N/A"); + format!( + "# {} - {}\n\n**Duration**: {}\n\n**Description**: {}\n\n**Confidence**: {:.2}", + self.company, self.role, duration, self.description, self.confidence + ) + } +} + +impl MemoryItem for RelationshipMemory { + fn key(&self) -> String { self.person.clone() } + fn memory_type(&self) -> MemoryType { MemoryType::Relationship } + fn confidence(&self) -> f32 { self.confidence } + fn id_prefix(&self) -> &'static str { "rel" } + fn file_dir(&self) -> &'static str { "relationships" } + fn format_content(&self) -> String { + format!( + "# {}\n\n**Type**: {}\n\n**Context**: {}\n\n**Confidence**: {:.2}", + self.person, self.relation_type, self.context, self.confidence + ) + } +} + +impl MemoryItem for GoalMemory { + fn key(&self) -> String { self.goal.clone() } + fn memory_type(&self) -> MemoryType { MemoryType::Goal } + fn confidence(&self) -> f32 { self.confidence } + fn id_prefix(&self) -> &'static str { "goal" } + fn file_dir(&self) -> &'static str { "goals" } + fn format_content(&self) -> String { + let timeline = self.timeline.as_deref().unwrap_or("未指定"); + format!( + "# {}\n\n**Category**: {}\n\n**Timeline**: {}\n\n**Confidence**: {:.2}", + self.goal, self.category, timeline, self.confidence + ) + } +} + +// ──────────────────────────────────────────────────────────────────────────── +// IncrementalMemoryUpdater +// ──────────────────────────────────────────────────────────────────────────── + /// Incremental Memory Updater /// /// Handles incremental updates to user and agent memories. @@ -58,17 +220,19 @@ impl IncrementalMemoryUpdater { extracted: &ExtractedMemories, ) -> Result { let mut result = MemoryUpdateResult::default(); - - // Process each memory type - self.process_preferences(&mut result, user_id, session_id, &extracted.preferences).await?; - self.process_entities(&mut result, user_id, session_id, &extracted.entities).await?; - self.process_events(&mut result, user_id, session_id, &extracted.events).await?; - self.process_cases(&mut result, agent_id, session_id, &extracted.cases).await?; - self.process_personal_info(&mut result, user_id, session_id, &extracted.personal_info).await?; - self.process_work_history(&mut result, user_id, session_id, &extracted.work_history).await?; - self.process_relationships(&mut result, user_id, session_id, &extracted.relationships).await?; - self.process_goals(&mut result, user_id, session_id, &extracted.goals).await?; - + + // Process user-scoped memory types + self.process_items(&mut result, &MemoryScope::User, user_id, session_id, &extracted.preferences).await?; + self.process_items(&mut result, &MemoryScope::User, user_id, session_id, &extracted.entities).await?; + self.process_items(&mut result, &MemoryScope::User, user_id, session_id, &extracted.events).await?; + self.process_items(&mut result, &MemoryScope::User, user_id, session_id, &extracted.personal_info).await?; + self.process_items(&mut result, &MemoryScope::User, user_id, session_id, &extracted.work_history).await?; + self.process_items(&mut result, &MemoryScope::User, user_id, session_id, &extracted.relationships).await?; + self.process_items(&mut result, &MemoryScope::User, user_id, session_id, &extracted.goals).await?; + + // Process agent-scoped memory types + self.process_items(&mut result, &MemoryScope::Agent, agent_id, session_id, &extracted.cases).await?; + // Record session extraction summary self.index_manager.record_session_extraction( &MemoryScope::User, @@ -77,953 +241,214 @@ impl IncrementalMemoryUpdater { result.created_ids.clone(), result.updated_ids.clone(), ).await?; - + info!( "Memory update complete for session {}: {} created, {} updated, {} deleted", session_id, result.created, result.updated, result.deleted ); - + Ok(result) } - /// Process preference memories - async fn process_preferences( + // ──────────────────────────────────────────────────────────────────────── + // Generic processing — the heart of the deduplication + // ──────────────────────────────────────────────────────────────────────── + + /// Process a slice of `MemoryItem` values through the standard pipeline: + /// find-existing → compare → create / update. + async fn process_items( &self, result: &mut MemoryUpdateResult, - user_id: &str, + scope: &MemoryScope, + owner_id: &str, session_id: &str, - preferences: &[PreferenceMemory], + items: &[T], ) -> Result<()> { - for pref in preferences { - let key = &pref.topic; - let existing = self.index_manager - .find_matching_memory(&MemoryScope::User, user_id, &MemoryType::Preference, key) - .await?; - - let content = self.format_preference_content(pref); + for item in items { + let key = item.key(); + let memory_type = item.memory_type(); + let confidence = item.confidence(); + let content = item.format_content(); let content_hash = MemoryIndexManager::calculate_content_hash(&content); let content_summary = MemoryIndexManager::generate_content_summary(&content, 200); - - match existing { - Some(existing_meta) => { - // Check if update is needed - if self.should_update(&existing_meta, pref.confidence, &content_hash, &content_summary).await? { - // Update existing memory - self.update_memory( - result, - user_id, - session_id, - existing_meta, - content, - content_hash, - content_summary, - pref.confidence, - ).await?; - } - } - None => { - // Create new memory - self.create_preference(result, user_id, session_id, pref, content, content_hash, content_summary).await?; - } - } - } - Ok(()) - } - /// Process entity memories - async fn process_entities( - &self, - result: &mut MemoryUpdateResult, - user_id: &str, - session_id: &str, - entities: &[EntityMemory], - ) -> Result<()> { - for entity in entities { - let key = &entity.name; - let existing = self.index_manager - .find_matching_memory(&MemoryScope::User, user_id, &MemoryType::Entity, key) + let existing = self + .index_manager + .find_matching_memory(scope, owner_id, &memory_type, &key) .await?; - - let content = self.format_entity_content(entity); - let content_hash = MemoryIndexManager::calculate_content_hash(&content); - let content_summary = MemoryIndexManager::generate_content_summary(&content, 200); - + match existing { Some(existing_meta) => { - if self.should_update_entity(&existing_meta, entity, &content_hash).await? { - self.update_memory( - result, - user_id, - session_id, - existing_meta, - content, - content_hash, - content_summary, - 0.9, + if self.should_update(&existing_meta, confidence, &content_hash, &content_summary).await? { + self.do_update_memory( + result, scope, owner_id, session_id, + existing_meta, content, content_hash, content_summary, confidence, ).await?; } } None => { - self.create_entity(result, user_id, session_id, entity, content, content_hash).await?; + self.do_create_memory( + result, scope, owner_id, session_id, + item, content, content_hash, content_summary, + ).await?; } } } Ok(()) } - /// Process event memories - async fn process_events( + // ──────────────────────────────────────────────────────────────────────── + // Create / Update / Delete — scope-agnostic helpers + // ──────────────────────────────────────────────────────────────────────── + + /// Create a new memory (works for any scope) + async fn do_create_memory( &self, result: &mut MemoryUpdateResult, - user_id: &str, + scope: &MemoryScope, + owner_id: &str, session_id: &str, - events: &[EventMemory], + item: &T, + content: String, + content_hash: String, + content_summary: String, ) -> Result<()> { - for event in events { - let key = &event.title; - let existing = self.index_manager - .find_matching_memory(&MemoryScope::User, user_id, &MemoryType::Event, key) - .await?; - - let content = self.format_event_content(event); - let content_hash = MemoryIndexManager::calculate_content_hash(&content); - let content_summary = MemoryIndexManager::generate_content_summary(&content, 200); - - match existing { - Some(existing_meta) => { - if self.should_update(&existing_meta, 0.8, &content_hash, &content_summary).await? { - self.update_memory( - result, - user_id, - session_id, - existing_meta, - content, - content_hash, - content_summary, - 0.8, - ).await?; - } - } - None => { - self.create_event(result, user_id, session_id, event, content, content_hash).await?; - } - } - } + let memory_id = format!( + "{}_{}", + item.id_prefix(), + uuid::Uuid::new_v4().to_string().split('-').next().unwrap() + ); + let file_path = format!("{}/{}.md", item.file_dir(), memory_id); + // MemoryScope implements Display as lowercase ("user", "agent", ...) + let file_uri = format!("cortex://{}/{}/{}", scope, owner_id, file_path); + + // Write content + let timestamped_content = Self::add_timestamp(&content); + self.filesystem.write(&file_uri, ×tamped_content).await?; + + // Create metadata + let metadata = MemoryMetadata::new( + memory_id.clone(), + file_path, + item.memory_type(), + item.key(), + content_hash, + session_id, + item.confidence(), + content_summary, + ); + + // Update index + self.index_manager.upsert_memory(scope, owner_id, metadata).await?; + + // Emit event + let _ = self.event_tx.send(MemoryEvent::MemoryCreated { + scope: scope.clone(), + owner_id: owner_id.to_string(), + memory_id: memory_id.clone(), + memory_type: item.memory_type(), + key: item.key(), + source_session: session_id.to_string(), + file_uri, + }); + + result.created += 1; + result.created_ids.push(memory_id); + Ok(()) } - /// Process agent case memories - async fn process_cases( + /// Update an existing memory (works for any scope) + async fn do_update_memory( &self, result: &mut MemoryUpdateResult, - agent_id: &str, + scope: &MemoryScope, + owner_id: &str, session_id: &str, - cases: &[CaseMemory], + existing: MemoryMetadata, + content: String, + content_hash: String, + content_summary: String, + confidence: f32, ) -> Result<()> { - for case in cases { - let key = &case.title; - let existing = self.index_manager - .find_matching_memory(&MemoryScope::Agent, agent_id, &MemoryType::Case, key) - .await?; - - let content = self.format_case_content(case); - let content_hash = MemoryIndexManager::calculate_content_hash(&content); - let content_summary = MemoryIndexManager::generate_content_summary(&content, 200); - - match existing { - Some(existing_meta) => { - if self.should_update(&existing_meta, 0.8, &content_hash, &content_summary).await? { - self.update_memory_agent( - result, - agent_id, - session_id, - existing_meta, - content, - content_hash, - content_summary, - ).await?; - } - } - None => { - self.create_case(result, agent_id, session_id, case, content, content_hash).await?; - } - } - } + // MemoryScope implements Display as lowercase ("user", "agent", ...) + let file_uri = format!("cortex://{}/{}/{}", scope, owner_id, existing.file); + let memory_id = existing.id.clone(); + let old_hash = existing.content_hash.clone(); + let new_hash = content_hash.clone(); + + // Write updated content + let timestamped_content = Self::add_timestamp(&content); + self.filesystem.write(&file_uri, ×tamped_content).await?; + + // Update metadata + let mut updated_meta = existing.clone(); + updated_meta.update(content_hash, session_id, confidence, content_summary); + + // Update index + self.index_manager.upsert_memory(scope, owner_id, updated_meta).await?; + + // Emit event + let _ = self.event_tx.send(MemoryEvent::MemoryUpdated { + scope: scope.clone(), + owner_id: owner_id.to_string(), + memory_id: memory_id.clone(), + memory_type: existing.memory_type.clone(), + key: existing.key.clone(), + source_session: session_id.to_string(), + file_uri: file_uri.clone(), + old_content_hash: old_hash, + new_content_hash: new_hash, + }); + + result.updated += 1; + result.updated_ids.push(memory_id.clone()); + + debug!("Updated memory {} for {}/{}", memory_id, scope, owner_id); Ok(()) } - /// Process personal info memories - async fn process_personal_info( + // ──────────────────────────────────────────────────────────────────────── + // Decision helpers + // ──────────────────────────────────────────────────────────────────────── + + /// Check if an existing memory should be updated + async fn should_update( &self, - result: &mut MemoryUpdateResult, - user_id: &str, - session_id: &str, - personal_info: &[PersonalInfoMemory], - ) -> Result<()> { - for info in personal_info { - let key = &info.category; - let existing = self.index_manager - .find_matching_memory(&MemoryScope::User, user_id, &MemoryType::PersonalInfo, key) - .await?; - - let content = self.format_personal_info_content(info); - let content_hash = MemoryIndexManager::calculate_content_hash(&content); - let content_summary = MemoryIndexManager::generate_content_summary(&content, 200); - - match existing { - Some(existing_meta) => { - if self.should_update(&existing_meta, info.confidence, &content_hash, &content_summary).await? { - self.update_memory( - result, - user_id, - session_id, - existing_meta, - content, - content_hash, - content_summary, - info.confidence, - ).await?; - } - } - None => { - self.create_personal_info(result, user_id, session_id, info, content, content_hash).await?; - } - } + existing: &MemoryMetadata, + new_confidence: f32, + new_hash: &str, + new_summary: &str, + ) -> Result { + // Update if new confidence is significantly higher + if new_confidence > existing.confidence + 0.1 { + return Ok(true); } - Ok(()) - } - /// Process work history memories - async fn process_work_history( - &self, - result: &mut MemoryUpdateResult, - user_id: &str, - session_id: &str, - work_history: &[WorkHistoryMemory], - ) -> Result<()> { - for work in work_history { - let key = format!("{}_{}", work.company, work.role); - let existing = self.index_manager - .find_matching_memory(&MemoryScope::User, user_id, &MemoryType::WorkHistory, &key) - .await?; - - let content = self.format_work_history_content(work); - let content_hash = MemoryIndexManager::calculate_content_hash(&content); - let content_summary = MemoryIndexManager::generate_content_summary(&content, 200); - - match existing { - Some(existing_meta) => { - if self.should_update(&existing_meta, work.confidence, &content_hash, &content_summary).await? { - self.update_memory( - result, - user_id, - session_id, - existing_meta, - content, - content_hash, - content_summary, - work.confidence, - ).await?; - } - } - None => { - self.create_work_history(result, user_id, session_id, work, content, content_hash).await?; - } - } + // Update if content changed + if MemoryIndexManager::content_changed( + &existing.content_hash, + new_hash, + &existing.content_summary, + new_summary, + ) { + return Ok(true); } - Ok(()) + + Ok(false) + } + + // ──────────────────────────────────────────────────────────────────────── + // Utility + // ──────────────────────────────────────────────────────────────────────── + + fn add_timestamp(content: &str) -> String { + let timestamp = chrono::Utc::now().format("%Y-%m-%d %H:%M:%S UTC"); + format!("{}\n\n**Added**: {}", content, timestamp) } - /// Process relationship memories - async fn process_relationships( - &self, - result: &mut MemoryUpdateResult, - user_id: &str, - session_id: &str, - relationships: &[RelationshipMemory], - ) -> Result<()> { - for rel in relationships { - let key = &rel.person; - let existing = self.index_manager - .find_matching_memory(&MemoryScope::User, user_id, &MemoryType::Relationship, key) - .await?; - - let content = self.format_relationship_content(rel); - let content_hash = MemoryIndexManager::calculate_content_hash(&content); - let content_summary = MemoryIndexManager::generate_content_summary(&content, 200); - - match existing { - Some(existing_meta) => { - if self.should_update(&existing_meta, rel.confidence, &content_hash, &content_summary).await? { - self.update_memory( - result, - user_id, - session_id, - existing_meta, - content, - content_hash, - content_summary, - rel.confidence, - ).await?; - } - } - None => { - self.create_relationship(result, user_id, session_id, rel, content, content_hash).await?; - } - } - } - Ok(()) - } - - /// Process goal memories - async fn process_goals( - &self, - result: &mut MemoryUpdateResult, - user_id: &str, - session_id: &str, - goals: &[GoalMemory], - ) -> Result<()> { - for goal in goals { - let key = &goal.goal; - let existing = self.index_manager - .find_matching_memory(&MemoryScope::User, user_id, &MemoryType::Goal, key) - .await?; - - let content = self.format_goal_content(goal); - let content_hash = MemoryIndexManager::calculate_content_hash(&content); - let content_summary = MemoryIndexManager::generate_content_summary(&content, 200); - - match existing { - Some(existing_meta) => { - if self.should_update(&existing_meta, goal.confidence, &content_hash, &content_summary).await? { - self.update_memory( - result, - user_id, - session_id, - existing_meta, - content, - content_hash, - content_summary, - goal.confidence, - ).await?; - } - } - None => { - self.create_goal(result, user_id, session_id, goal, content, content_hash).await?; - } - } - } - Ok(()) - } - - /// Check if an existing memory should be updated - async fn should_update( - &self, - existing: &MemoryMetadata, - new_confidence: f32, - new_hash: &str, - new_summary: &str, - ) -> Result { - // Update if new confidence is significantly higher - if new_confidence > existing.confidence + 0.1 { - return Ok(true); - } - - // Update if content changed - if MemoryIndexManager::content_changed( - &existing.content_hash, - new_hash, - &existing.content_summary, - new_summary, - ) { - return Ok(true); - } - - Ok(false) - } - - /// Check if entity should be updated (with context comparison) - async fn should_update_entity( - &self, - existing: &MemoryMetadata, - _new_entity: &EntityMemory, - new_hash: &str, - ) -> Result { - // Always update if content hash changed - if existing.content_hash != new_hash { - return Ok(true); - } - - // Update if entity type or description differs significantly - // This is a simplified check - can be enhanced with LLM comparison - Ok(false) - } - - /// Update an existing memory - async fn update_memory( - &self, - result: &mut MemoryUpdateResult, - user_id: &str, - session_id: &str, - existing: MemoryMetadata, - content: String, - content_hash: String, - content_summary: String, - confidence: f32, - ) -> Result<()> { - let file_uri = format!("cortex://user/{}/{}", user_id, existing.file); - let memory_id = existing.id.clone(); - let old_hash = existing.content_hash.clone(); - let new_hash = content_hash.clone(); - - // Write updated content - let timestamped_content = self.add_timestamp(&content); - self.filesystem.write(&file_uri, ×tamped_content).await?; - - // Update metadata - let mut updated_meta = existing.clone(); - updated_meta.update(content_hash, session_id, confidence, content_summary); - - // Update index - self.index_manager.upsert_memory(&MemoryScope::User, user_id, updated_meta).await?; - - // Emit event - let _ = self.event_tx.send(MemoryEvent::MemoryUpdated { - scope: MemoryScope::User, - owner_id: user_id.to_string(), - memory_id: memory_id.clone(), - memory_type: existing.memory_type.clone(), - key: existing.key.clone(), - source_session: session_id.to_string(), - file_uri: file_uri.clone(), - old_content_hash: old_hash, - new_content_hash: new_hash, - }); - - result.updated += 1; - result.updated_ids.push(memory_id.clone()); - - debug!("Updated memory {} for user {}", memory_id, user_id); - Ok(()) - } - - /// Update agent memory - async fn update_memory_agent( - &self, - result: &mut MemoryUpdateResult, - agent_id: &str, - session_id: &str, - existing: MemoryMetadata, - content: String, - content_hash: String, - content_summary: String, - ) -> Result<()> { - let file_uri = format!("cortex://agent/{}/{}", agent_id, existing.file); - let memory_id = existing.id.clone(); - let old_hash = existing.content_hash.clone(); - let new_hash = content_hash.clone(); - - // Write updated content - let timestamped_content = self.add_timestamp(&content); - self.filesystem.write(&file_uri, ×tamped_content).await?; - - // Update metadata - let mut updated_meta = existing.clone(); - updated_meta.update(content_hash, session_id, 0.9, content_summary); - - // Update index - self.index_manager.upsert_memory(&MemoryScope::Agent, agent_id, updated_meta).await?; - - // Emit event - let _ = self.event_tx.send(MemoryEvent::MemoryUpdated { - scope: MemoryScope::Agent, - owner_id: agent_id.to_string(), - memory_id: memory_id.clone(), - memory_type: MemoryType::Case, - key: existing.key.clone(), - source_session: session_id.to_string(), - file_uri: file_uri.clone(), - old_content_hash: old_hash, - new_content_hash: new_hash, - }); - - result.updated += 1; - result.updated_ids.push(memory_id.clone()); - - Ok(()) - } - - // Create methods for each memory type - async fn create_preference( - &self, - result: &mut MemoryUpdateResult, - user_id: &str, - session_id: &str, - pref: &PreferenceMemory, - content: String, - content_hash: String, - content_summary: String, - ) -> Result<()> { - let memory_id = format!("pref_{}", uuid::Uuid::new_v4().to_string().split('-').next().unwrap()); - let file_path = format!("preferences/{}.md", memory_id); - let file_uri = format!("cortex://user/{}/{}", user_id, file_path); - - // Write content - let timestamped_content = self.add_timestamp(&content); - self.filesystem.write(&file_uri, ×tamped_content).await?; - - // Create metadata - let metadata = MemoryMetadata::new( - memory_id.clone(), - file_path, - MemoryType::Preference, - pref.topic.clone(), - content_hash, - session_id, - pref.confidence, - content_summary, - ); - - // Update index - self.index_manager.upsert_memory(&MemoryScope::User, user_id, metadata).await?; - - // Emit event - let _ = self.event_tx.send(MemoryEvent::MemoryCreated { - scope: MemoryScope::User, - owner_id: user_id.to_string(), - memory_id: memory_id.clone(), - memory_type: MemoryType::Preference, - key: pref.topic.clone(), - source_session: session_id.to_string(), - file_uri, - }); - - result.created += 1; - result.created_ids.push(memory_id); - - Ok(()) - } - - async fn create_entity( - &self, - result: &mut MemoryUpdateResult, - user_id: &str, - session_id: &str, - entity: &EntityMemory, - content: String, - content_hash: String, - ) -> Result<()> { - let memory_id = format!("entity_{}", uuid::Uuid::new_v4().to_string().split('-').next().unwrap()); - let file_path = format!("entities/{}.md", memory_id); - let file_uri = format!("cortex://user/{}/{}", user_id, file_path); - let content_summary = MemoryIndexManager::generate_content_summary(&content, 200); - - let timestamped_content = self.add_timestamp(&content); - self.filesystem.write(&file_uri, ×tamped_content).await?; - - let metadata = MemoryMetadata::new( - memory_id.clone(), - file_path, - MemoryType::Entity, - entity.name.clone(), - content_hash, - session_id, - 0.9, - content_summary, - ); - - self.index_manager.upsert_memory(&MemoryScope::User, user_id, metadata).await?; - - let _ = self.event_tx.send(MemoryEvent::MemoryCreated { - scope: MemoryScope::User, - owner_id: user_id.to_string(), - memory_id: memory_id.clone(), - memory_type: MemoryType::Entity, - key: entity.name.clone(), - source_session: session_id.to_string(), - file_uri, - }); - - result.created += 1; - result.created_ids.push(memory_id); - - Ok(()) - } - - async fn create_event( - &self, - result: &mut MemoryUpdateResult, - user_id: &str, - session_id: &str, - event: &EventMemory, - content: String, - content_hash: String, - ) -> Result<()> { - let memory_id = format!("event_{}", uuid::Uuid::new_v4().to_string().split('-').next().unwrap()); - let file_path = format!("events/{}.md", memory_id); - let file_uri = format!("cortex://user/{}/{}", user_id, file_path); - let content_summary = MemoryIndexManager::generate_content_summary(&content, 200); - - let timestamped_content = self.add_timestamp(&content); - self.filesystem.write(&file_uri, ×tamped_content).await?; - - let metadata = MemoryMetadata::new( - memory_id.clone(), - file_path, - MemoryType::Event, - event.title.clone(), - content_hash, - session_id, - 0.8, - content_summary, - ); - - self.index_manager.upsert_memory(&MemoryScope::User, user_id, metadata).await?; - - let _ = self.event_tx.send(MemoryEvent::MemoryCreated { - scope: MemoryScope::User, - owner_id: user_id.to_string(), - memory_id: memory_id.clone(), - memory_type: MemoryType::Event, - key: event.title.clone(), - source_session: session_id.to_string(), - file_uri, - }); - - result.created += 1; - result.created_ids.push(memory_id); - - Ok(()) - } - - async fn create_case( - &self, - result: &mut MemoryUpdateResult, - agent_id: &str, - session_id: &str, - case: &CaseMemory, - content: String, - content_hash: String, - ) -> Result<()> { - let memory_id = format!("case_{}", uuid::Uuid::new_v4().to_string().split('-').next().unwrap()); - let file_path = format!("cases/{}.md", memory_id); - let file_uri = format!("cortex://agent/{}/{}", agent_id, file_path); - let content_summary = MemoryIndexManager::generate_content_summary(&content, 200); - - let timestamped_content = self.add_timestamp(&content); - self.filesystem.write(&file_uri, ×tamped_content).await?; - - let metadata = MemoryMetadata::new( - memory_id.clone(), - file_path, - MemoryType::Case, - case.title.clone(), - content_hash, - session_id, - 0.9, - content_summary, - ); - - self.index_manager.upsert_memory(&MemoryScope::Agent, agent_id, metadata).await?; - - let _ = self.event_tx.send(MemoryEvent::MemoryCreated { - scope: MemoryScope::Agent, - owner_id: agent_id.to_string(), - memory_id: memory_id.clone(), - memory_type: MemoryType::Case, - key: case.title.clone(), - source_session: session_id.to_string(), - file_uri, - }); - - result.created += 1; - result.created_ids.push(memory_id); - - Ok(()) - } - - async fn create_personal_info( - &self, - result: &mut MemoryUpdateResult, - user_id: &str, - session_id: &str, - info: &PersonalInfoMemory, - content: String, - content_hash: String, - ) -> Result<()> { - let memory_id = format!("info_{}", uuid::Uuid::new_v4().to_string().split('-').next().unwrap()); - let file_path = format!("personal_info/{}.md", memory_id); - let file_uri = format!("cortex://user/{}/{}", user_id, file_path); - let content_summary = MemoryIndexManager::generate_content_summary(&content, 200); - - let timestamped_content = self.add_timestamp(&content); - self.filesystem.write(&file_uri, ×tamped_content).await?; - - let metadata = MemoryMetadata::new( - memory_id.clone(), - file_path, - MemoryType::PersonalInfo, - info.category.clone(), - content_hash, - session_id, - info.confidence, - content_summary, - ); - - self.index_manager.upsert_memory(&MemoryScope::User, user_id, metadata).await?; - - let _ = self.event_tx.send(MemoryEvent::MemoryCreated { - scope: MemoryScope::User, - owner_id: user_id.to_string(), - memory_id: memory_id.clone(), - memory_type: MemoryType::PersonalInfo, - key: info.category.clone(), - source_session: session_id.to_string(), - file_uri, - }); - - result.created += 1; - result.created_ids.push(memory_id); - - Ok(()) - } - - async fn create_work_history( - &self, - result: &mut MemoryUpdateResult, - user_id: &str, - session_id: &str, - work: &WorkHistoryMemory, - content: String, - content_hash: String, - ) -> Result<()> { - let memory_id = format!("work_{}", uuid::Uuid::new_v4().to_string().split('-').next().unwrap()); - let file_path = format!("work_history/{}.md", memory_id); - let file_uri = format!("cortex://user/{}/{}", user_id, file_path); - let content_summary = MemoryIndexManager::generate_content_summary(&content, 200); - - let timestamped_content = self.add_timestamp(&content); - self.filesystem.write(&file_uri, ×tamped_content).await?; - - let key = format!("{}_{}", work.company, work.role); - let metadata = MemoryMetadata::new( - memory_id.clone(), - file_path, - MemoryType::WorkHistory, - key, - content_hash, - session_id, - work.confidence, - content_summary, - ); - - self.index_manager.upsert_memory(&MemoryScope::User, user_id, metadata).await?; - - let _ = self.event_tx.send(MemoryEvent::MemoryCreated { - scope: MemoryScope::User, - owner_id: user_id.to_string(), - memory_id: memory_id.clone(), - memory_type: MemoryType::WorkHistory, - key: format!("{}_{}", work.company, work.role), - source_session: session_id.to_string(), - file_uri, - }); - - result.created += 1; - result.created_ids.push(memory_id); - - Ok(()) - } - - async fn create_relationship( - &self, - result: &mut MemoryUpdateResult, - user_id: &str, - session_id: &str, - rel: &RelationshipMemory, - content: String, - content_hash: String, - ) -> Result<()> { - let memory_id = format!("rel_{}", uuid::Uuid::new_v4().to_string().split('-').next().unwrap()); - let file_path = format!("relationships/{}.md", memory_id); - let file_uri = format!("cortex://user/{}/{}", user_id, file_path); - let content_summary = MemoryIndexManager::generate_content_summary(&content, 200); - - let timestamped_content = self.add_timestamp(&content); - self.filesystem.write(&file_uri, ×tamped_content).await?; - - let metadata = MemoryMetadata::new( - memory_id.clone(), - file_path, - MemoryType::Relationship, - rel.person.clone(), - content_hash, - session_id, - rel.confidence, - content_summary, - ); - - self.index_manager.upsert_memory(&MemoryScope::User, user_id, metadata).await?; - - let _ = self.event_tx.send(MemoryEvent::MemoryCreated { - scope: MemoryScope::User, - owner_id: user_id.to_string(), - memory_id: memory_id.clone(), - memory_type: MemoryType::Relationship, - key: rel.person.clone(), - source_session: session_id.to_string(), - file_uri, - }); - - result.created += 1; - result.created_ids.push(memory_id); - - Ok(()) - } - - async fn create_goal( - &self, - result: &mut MemoryUpdateResult, - user_id: &str, - session_id: &str, - goal: &GoalMemory, - content: String, - content_hash: String, - ) -> Result<()> { - let memory_id = format!("goal_{}", uuid::Uuid::new_v4().to_string().split('-').next().unwrap()); - let file_path = format!("goals/{}.md", memory_id); - let file_uri = format!("cortex://user/{}/{}", user_id, file_path); - let content_summary = MemoryIndexManager::generate_content_summary(&content, 200); - - let timestamped_content = self.add_timestamp(&content); - self.filesystem.write(&file_uri, ×tamped_content).await?; - - let metadata = MemoryMetadata::new( - memory_id.clone(), - file_path, - MemoryType::Goal, - goal.goal.clone(), - content_hash, - session_id, - goal.confidence, - content_summary, - ); - - self.index_manager.upsert_memory(&MemoryScope::User, user_id, metadata).await?; - - let _ = self.event_tx.send(MemoryEvent::MemoryCreated { - scope: MemoryScope::User, - owner_id: user_id.to_string(), - memory_id: memory_id.clone(), - memory_type: MemoryType::Goal, - key: goal.goal.clone(), - source_session: session_id.to_string(), - file_uri, - }); - - result.created += 1; - result.created_ids.push(memory_id); - - Ok(()) - } - - // Content formatting methods - fn format_preference_content(&self, pref: &PreferenceMemory) -> String { - format!( - "# {}\n\n{}\n\n**Confidence**: {:.2}", - pref.topic, - pref.preference, - pref.confidence - ) - } - - fn format_entity_content(&self, entity: &EntityMemory) -> String { - format!( - "# {}\n\n**Type**: {}\n\n**Description**: {}\n\n**Context**: {}", - entity.name, - entity.entity_type, - entity.description, - entity.context - ) - } - - fn format_event_content(&self, event: &EventMemory) -> String { - let timestamp = event.timestamp.as_deref().unwrap_or("N/A"); - format!( - "# {}\n\n**Type**: {}\n\n**Summary**: {}\n\n**Timestamp**: {}", - event.title, - event.event_type, - event.summary, - timestamp - ) - } - - fn format_case_content(&self, case: &CaseMemory) -> String { - let lessons = case - .lessons_learned - .iter() - .map(|l| format!("- {}", l)) - .collect::>() - .join("\n"); - - format!( - "# {}\n\n## Problem\n\n{}\n\n## Solution\n\n{}\n\n## Lessons Learned\n\n{}", - case.title, - case.problem, - case.solution, - lessons - ) - } - - fn format_personal_info_content(&self, info: &PersonalInfoMemory) -> String { - format!( - "# {}\n\n{}\n\n**Confidence**: {:.2}", - info.category, - info.content, - info.confidence - ) - } - - fn format_work_history_content(&self, work: &WorkHistoryMemory) -> String { - let duration = work.duration.as_deref().unwrap_or("N/A"); - format!( - "# {} - {}\n\n**Duration**: {}\n\n**Description**: {}\n\n**Confidence**: {:.2}", - work.company, - work.role, - duration, - work.description, - work.confidence - ) - } - - fn format_relationship_content(&self, rel: &RelationshipMemory) -> String { - format!( - "# {}\n\n**Type**: {}\n\n**Context**: {}\n\n**Confidence**: {:.2}", - rel.person, - rel.relation_type, - rel.context, - rel.confidence - ) - } - - fn format_goal_content(&self, goal: &GoalMemory) -> String { - let timeline = goal.timeline.as_deref().unwrap_or("未指定"); - format!( - "# {}\n\n**Category**: {}\n\n**Timeline**: {}\n\n**Confidence**: {:.2}", - goal.goal, - goal.category, - timeline, - goal.confidence - ) - } - - fn add_timestamp(&self, content: &str) -> String { - let timestamp = chrono::Utc::now().format("%Y-%m-%d %H:%M:%S UTC"); - format!("{}\n\n**Added**: {}", content, timestamp) - } - - /// Delete a memory - pub async fn delete_memory( + /// Delete a memory + pub async fn delete_memory( &self, scope: &MemoryScope, owner_id: &str, @@ -1032,28 +457,19 @@ impl IncrementalMemoryUpdater { ) -> Result { // Get metadata first let index = self.index_manager.load_index(scope.clone(), owner_id.to_string()).await?; - + if let Some(metadata) = index.memories.get(memory_id).cloned() { - let file_uri = format!( - "cortex://{}/{}/{}", - match scope { - MemoryScope::User => "user", - MemoryScope::Agent => "agent", - MemoryScope::Session => "session", - MemoryScope::Resources => "resources", - }, - owner_id, - metadata.file - ); - + // MemoryScope implements Display as lowercase ("user", "agent", ...) + let file_uri = format!("cortex://{}/{}/{}", scope, owner_id, metadata.file); + // Delete file if self.filesystem.exists(&file_uri).await? { self.filesystem.delete(&file_uri).await?; } - + // Remove from index self.index_manager.remove_memory(scope, owner_id, memory_id).await?; - + // Emit event let _ = self.event_tx.send(MemoryEvent::MemoryDeleted { scope: scope.clone(), @@ -1063,7 +479,7 @@ impl IncrementalMemoryUpdater { file_uri, reason, }); - + Ok(true) } else { Ok(false) diff --git a/cortex-mem-core/src/lib.rs b/cortex-mem-core/src/lib.rs index 85b4aa4..1f20c06 100644 --- a/cortex-mem-core/src/lib.rs +++ b/cortex-mem-core/src/lib.rs @@ -112,7 +112,7 @@ pub use memory_events::{ ChangeType, DeleteReason, EventStats, MemoryEvent, }; pub use memory_index_manager::MemoryIndexManager; -pub use incremental_memory_updater::IncrementalMemoryUpdater; +pub use incremental_memory_updater::{IncrementalMemoryUpdater, MemoryItem}; pub use cascade_layer_updater::{CascadeLayerUpdater, UpdateStats}; pub use cascade_layer_debouncer::{LayerUpdateDebouncer, DebouncerConfig}; // Phase 2 pub use llm_result_cache::{LlmResultCache, CacheConfig, CacheStats}; // Phase 3 diff --git a/cortex-mem-core/src/memory_cleanup.rs b/cortex-mem-core/src/memory_cleanup.rs index fc0c44d..4b4e9c2 100644 --- a/cortex-mem-core/src/memory_cleanup.rs +++ b/cortex-mem-core/src/memory_cleanup.rs @@ -2,10 +2,15 @@ //! //! 负责定期扫描记忆索引,根据记忆强度(Ebbinghaus 遗忘曲线) //! 将低强度记忆归档或彻底删除,控制长程 Agent 的记忆空间膨胀。 +//! +//! ## 向量同步 +//! 删除记忆时会同步从 Qdrant 向量库中移除对应向量(所有 L0/L1/L2 层), +//! 确保归档/遗忘后的记忆不再出现在语义检索结果中。 use crate::{ memory_index::{MemoryMetadata, MemoryScope}, memory_index_manager::MemoryIndexManager, + vector_sync_manager::VectorSyncManager, Result, }; use std::sync::Arc; @@ -48,20 +53,27 @@ pub struct CleanupStats { /// 使用方式: /// ```rust,no_run /// // 在 Agent 启动时创建,定期手动调用 run_cleanup,或用 tokio::spawn + interval 运行 -/// let svc = MemoryCleanupService::new(index_manager, MemoryCleanupConfig::default()); +/// let svc = MemoryCleanupService::new(index_manager, config, Some(vector_sync)); /// let stats = svc.run_cleanup(&MemoryScope::User, "alice").await?; /// println!("Archived: {}, Deleted: {}", stats.archived, stats.deleted); /// ``` pub struct MemoryCleanupService { index_manager: Arc, config: MemoryCleanupConfig, + /// Optional vector sync manager for cleaning up Qdrant vectors on delete + vector_sync: Option>, } impl MemoryCleanupService { - pub fn new(index_manager: Arc, config: MemoryCleanupConfig) -> Self { + pub fn new( + index_manager: Arc, + config: MemoryCleanupConfig, + vector_sync: Option>, + ) -> Self { Self { index_manager, config, + vector_sync, } } @@ -125,6 +137,27 @@ impl MemoryCleanupService { "Deleting archived memory '{}' (strength < {:.3}, key='{}')", id, self.config.delete_threshold, meta.key ); + // Sync-delete vectors from Qdrant so the memory no longer + // appears in semantic search results. + // MemoryScope implements Display as lowercase ("user", "agent", ...) + if let Some(ref vs) = self.vector_sync { + let file_uri = format!( + "cortex://{}/{}/{}", + scope, owner_id, meta.file + ); + if let Err(e) = vs + .sync_file_change( + &file_uri, + crate::memory_events::ChangeType::Delete, + ) + .await + { + warn!( + "Failed to delete vectors for memory '{}': {}", + id, e + ); + } + } } } self.index_manager.save_index(&index).await?; @@ -136,6 +169,11 @@ impl MemoryCleanupService { scope, owner_id, stats.total_scanned, stats.archived, stats.deleted ); + // Invalidate the in-memory cache so subsequent loads see the updated index. + // This is important when multiple MemoryIndexManager instances share the same + // filesystem (e.g., cortex-mem-tools and cortex-mem-service in the same process). + self.index_manager.invalidate_cache(scope, owner_id).await; + Ok(stats) } diff --git a/cortex-mem-core/src/memory_index_manager.rs b/cortex-mem-core/src/memory_index_manager.rs index 003c60d..2b4d2b0 100644 --- a/cortex-mem-core/src/memory_index_manager.rs +++ b/cortex-mem-core/src/memory_index_manager.rs @@ -179,13 +179,14 @@ impl MemoryIndexManager { } /// Calculate content hash for change detection + /// + /// Uses SHA-256 to ensure stable hashes across Rust versions and platforms. + /// (DefaultHasher is not guaranteed to be stable across versions) pub fn calculate_content_hash(content: &str) -> String { - use std::collections::hash_map::DefaultHasher; - use std::hash::{Hash, Hasher}; - - let mut hasher = DefaultHasher::new(); - content.hash(&mut hasher); - format!("{:x}", hasher.finish()) + use sha2::{Digest, Sha256}; + let mut hasher = Sha256::new(); + hasher.update(content.as_bytes()); + format!("{:x}", hasher.finalize()) } /// Generate a summary from content (for quick comparison) diff --git a/cortex-mem-core/src/search/vector_engine.rs b/cortex-mem-core/src/search/vector_engine.rs index ef28dcc..011e59a 100644 --- a/cortex-mem-core/src/search/vector_engine.rs +++ b/cortex-mem-core/src/search/vector_engine.rs @@ -3,6 +3,9 @@ use crate::{ embedding::EmbeddingClient, filesystem::CortexFilesystem, llm::LLMClient, + memory_events::MemoryEvent, + memory_index::MemoryScope, + memory_index_manager::MemoryIndexManager, vector_store::{QdrantVectorStore, VectorStore, uri_to_vector_id}, }; use crate::llm::prompts::Prompts; @@ -10,6 +13,7 @@ use super::{EnhancedQueryIntent, QueryIntentType, TimeConstraint}; use super::weight_model; use serde::{Deserialize, Serialize}; use std::sync::Arc; +use tokio::sync::mpsc; use tracing::{debug, info, warn}; /// Search options @@ -56,6 +60,10 @@ pub struct VectorSearchEngine { filesystem: Arc, /// Optional LLM client for intent analysis llm_client: Option>, + /// Optional event sender for MemoryAccessed events (drives forgetting mechanism) + memory_event_tx: Option>, + /// Optional index manager for archived-memory filtering + index_manager: Option>, } impl VectorSearchEngine { @@ -70,6 +78,8 @@ impl VectorSearchEngine { embedding, filesystem, llm_client: None, + memory_event_tx: None, + index_manager: None, } } @@ -85,9 +95,158 @@ impl VectorSearchEngine { embedding, filesystem, llm_client: Some(llm_client), + memory_event_tx: None, + index_manager: None, } } + /// Set the memory event sender for access tracking (enables forgetting mechanism) + pub fn with_memory_event_tx(mut self, tx: mpsc::UnboundedSender) -> Self { + self.memory_event_tx = Some(tx); + self + } + + /// Set the memory index manager for archived-memory filtering + /// + /// When configured, search results whose corresponding `MemoryMetadata.archived == true` + /// will be removed from the result set, preventing stale/forgotten memories from + /// surfacing in semantic search. + pub fn with_index_manager(mut self, index_manager: Arc) -> Self { + self.index_manager = Some(index_manager); + self + } + + /// Filter out archived memories from a result list. + /// + /// Loads the index for each unique (scope, owner_id) combination found in the + /// results and removes any item whose memory ID is marked as archived. + /// Results whose URIs cannot be parsed are kept (conservative approach). + async fn filter_archived(&self, results: Vec) -> Vec { + let im = match &self.index_manager { + Some(im) => im, + None => return results, + }; + + // Build a cache of (scope, owner_id) → MemoryIndex to avoid repeated I/O + let mut index_cache: std::collections::HashMap< + (MemoryScope, String), + crate::memory_index::MemoryIndex, + > = std::collections::HashMap::new(); + + let total_before = results.len(); + let mut filtered = Vec::with_capacity(total_before); + + for result in results { + let keep = match Self::parse_scope_owner_from_uri(&result.uri) { + None => true, // Cannot parse URI → keep conservatively + Some((scope, owner_id, memory_id)) => { + let key = (scope.clone(), owner_id.clone()); + let index = if let Some(idx) = index_cache.get(&key) { + idx + } else { + match im.load_index(scope.clone(), owner_id.clone()).await { + Ok(idx) => { + index_cache.insert(key.clone(), idx); + index_cache.get(&key).unwrap() + } + Err(e) => { + warn!("Failed to load index for {}/{}: {}", scope, owner_id, e); + filtered.push(result); + continue; + } + } + }; + + !index + .memories + .get(&memory_id) + .map(|m| m.archived) + .unwrap_or(false) + } + }; + + if keep { + filtered.push(result); + } else { + debug!("Filtered archived memory: {}", result.uri); + } + } + + let archived_count = total_before - filtered.len(); + if archived_count > 0 { + info!( + "Filtered {}/{} archived memories from search results", + archived_count, total_before + ); + } + + filtered + } + /// + /// Extracts scope/owner from URI and sends events asynchronously. + /// Failures are logged but do not affect search results. + fn emit_access_events(&self, results: &[SearchResult], query: &str) { + let tx = match &self.memory_event_tx { + Some(tx) => tx, + None => return, + }; + + for result in results { + // Parse URI: cortex://{scope}/{owner_id}/... + if let Some(parsed) = Self::parse_scope_owner_from_uri(&result.uri) { + let (scope, owner_id, memory_id) = parsed; + let _ = tx.send(MemoryEvent::MemoryAccessed { + scope, + owner_id, + memory_id, + context: query.to_string(), + }); + } + } + } + + /// Parse scope, owner_id and memory_id from a cortex:// URI. + /// + /// URI format: `cortex://{scope}/{owner_id}/{type_dir}/{memory_file}.md` + /// + /// The returned `memory_id` is the **file name stem** of the last path segment + /// (e.g. `"pref_abc123"` from `"cortex://user/u1/preferences/pref_abc123.md"`). + /// + /// This matches `MemoryMetadata.id` because `IncrementalMemoryUpdater` generates + /// the ID first and then writes the file as `{id}.md`. The invariant is: + /// `MemoryMetadata.id == file_stem(MemoryMetadata.file)` + /// + /// If the URI cannot be parsed the caller should keep the result (conservative approach). + fn parse_scope_owner_from_uri(uri: &str) -> Option<(MemoryScope, String, String)> { + let stripped = uri.strip_prefix("cortex://")?; + let parts: Vec<&str> = stripped.splitn(4, '/').collect(); + if parts.len() < 3 { + return None; + } + + let scope = match parts[0] { + "user" => MemoryScope::User, + "agent" => MemoryScope::Agent, + "session" => MemoryScope::Session, + "resources" => MemoryScope::Resources, + _ => return None, + }; + let owner_id = parts[1].to_string(); + // Use the file name stem as memory_id hint (e.g., "pref_abc123" from "preferences/pref_abc123.md") + let memory_id = if parts.len() == 4 { + parts[3] + .rsplit('/') + .next() + .unwrap_or(parts[3]) + .trim_end_matches(".md") + .to_string() + } else { + parts[2].trim_end_matches(".md").to_string() + }; + + Some((scope, owner_id, memory_id)) + } + /// Semantic search using vector similarity pub async fn semantic_search( &self, @@ -156,11 +315,16 @@ impl VectorSearchEngine { }); } + // 4. Filter archived memories (before emitting access events) + let results = self.filter_archived(results).await; + + // 5. Emit MemoryAccessed events (fire-and-forget for forgetting mechanism) + self.emit_access_events(&results, query); + Ok(results) } - /// Layered semantic search - utilizes L0/L1/L2 three-layer architecture - /// + /// Layered semantic search - utilizes L0/L1/L2 three-layer architecture /// /// Three-stage retrieval strategy: /// 1. Stage 1 (L0): Fast positioning using .abstract.md files /// 2. Stage 2 (L1): Deep exploration using .overview.md files @@ -396,10 +560,17 @@ impl VectorSearchEngine { final_results.sort_by(|a, b| b.score.partial_cmp(&a.score).unwrap()); final_results.truncate(options.limit); + // Filter out archived memories + let final_results = self.filter_archived(final_results).await; + info!( "Layered search completed: {} final results", final_results.len() ); + + // Emit MemoryAccessed events for final results (fire-and-forget) + self.emit_access_events(&final_results, &intent.original_query); + Ok(final_results) } diff --git a/cortex-mem-service/src/handlers/sessions.rs b/cortex-mem-service/src/handlers/sessions.rs index 10ab9d6..10d95ba 100644 --- a/cortex-mem-service/src/handlers/sessions.rs +++ b/cortex-mem-service/src/handlers/sessions.rs @@ -149,6 +149,20 @@ pub async fn close_session( let mut session_mgr = state.session_manager.write().await; let metadata = session_mgr.close_session(&thread_id).await?; + // Emit SessionClosed event to trigger full memory extraction pipeline. + // This mirrors what cortex-mem-tools does in close_session_sync. + if let Some(ref tx) = state.memory_event_tx { + // Load user_id / agent_id from metadata; fall back to "default" if not set. + let user_id = metadata.user_id.clone().unwrap_or_else(|| "default".to_string()); + let agent_id = metadata.agent_id.clone().unwrap_or_else(|| "default".to_string()); + let _ = tx.send(cortex_mem_core::memory_events::MemoryEvent::SessionClosed { + session_id: thread_id.clone(), + user_id, + agent_id, + }); + tracing::info!("SessionClosed event emitted for thread {}", thread_id); + } + let response = SessionResponse { thread_id: metadata.thread_id, status: format!("{:?}", metadata.status), diff --git a/cortex-mem-service/src/state.rs b/cortex-mem-service/src/state.rs index e801aea..311dccd 100644 --- a/cortex-mem-service/src/state.rs +++ b/cortex-mem-service/src/state.rs @@ -1,6 +1,8 @@ use cortex_mem_core::{ CortexFilesystem, CortexMem, CortexMemBuilder, EmbeddingClient, - EmbeddingConfig, LLMClient, QdrantConfig, SessionManager, VectorSearchEngine, + EmbeddingConfig, LLMClient, MemoryIndexManager, QdrantConfig, + SessionManager, VectorSearchEngine, + memory_events::MemoryEvent, }; use std::path::PathBuf; use std::sync::Arc; @@ -27,6 +29,10 @@ pub struct AppState { pub current_tenant_root: Arc>>, /// Current tenant ID (for recreating tenant-specific vector store) pub current_tenant_id: Arc>>, + /// Memory event sender — used by close_session handler to trigger memory extraction. + /// Sending `MemoryEvent::SessionClosed` triggers the full extraction pipeline via + /// `MemoryEventCoordinator`. + pub memory_event_tx: Option>, } impl AppState { @@ -74,36 +80,37 @@ impl AppState { let embedding_client = cortex.embedding(); let vector_store = cortex.vector_store(); - // Vector search engine由Cortex Memory管理,这里我们需要重新创建一个 - // 因为Cortex Memory内部没有暴露VectorSearchEngine - let vector_engine = if let (Some(_vs), Some(ec)) = (&vector_store, &embedding_client) { - // 需要downcast到具体类型QdrantVectorStore - // 由于vs是trait对象,这里重新从配置创建 - let (_, _, qdrant_cfg_opt) = Self::load_configs()?; - if let Some(qdrant_cfg) = qdrant_cfg_opt { - if let Ok(qdrant_store) = cortex_mem_core::QdrantVectorStore::new(&qdrant_cfg).await - { - let qdrant_arc = Arc::new(qdrant_store); - if let Some(llm) = &llm_client { - Some(Arc::new(VectorSearchEngine::with_llm( - qdrant_arc, - ec.clone(), - filesystem.clone(), - llm.clone(), - ))) - } else { - Some(Arc::new(VectorSearchEngine::new( - qdrant_arc, - ec.clone(), - filesystem.clone(), - ))) - } - } else { - None - } + let memory_event_tx = cortex.memory_event_tx(); + + // Vector search engine — reuse the Qdrant connection from CortexMem builder + // instead of creating a third connection from config. + let qdrant_store_typed = cortex.qdrant_store(); + let index_manager = Arc::new(MemoryIndexManager::new(filesystem.clone())); + + let vector_engine = if let (Some(qdrant_arc), Some(ec)) = + (qdrant_store_typed, &embedding_client) + { + let mut engine = if let Some(llm) = &llm_client { + VectorSearchEngine::with_llm( + qdrant_arc, + ec.clone(), + filesystem.clone(), + llm.clone(), + ) } else { - None + VectorSearchEngine::new( + qdrant_arc, + ec.clone(), + filesystem.clone(), + ) + }; + // Wire up forgetting-mechanism event tracking and archived-memory filter. + // Clone the sender so we can still store the original in AppState. + if let Some(ref tx) = memory_event_tx { + engine = engine.with_memory_event_tx(tx.clone()); } + engine = engine.with_index_manager(index_manager.clone()); + Some(Arc::new(engine)) } else { None }; @@ -119,6 +126,7 @@ impl AppState { data_dir, current_tenant_root: Arc::new(RwLock::new(None)), current_tenant_id: Arc::new(RwLock::new(None)), + memory_event_tx, }) } @@ -321,12 +329,20 @@ impl AppState { tenant_root.to_string_lossy().as_ref(), )); - let new_vector_engine = Arc::new(VectorSearchEngine::with_llm( - qdrant_arc, - ec.clone(), - tenant_filesystem, - llm.clone(), - )); + let new_vector_engine = Arc::new( + VectorSearchEngine::with_llm( + qdrant_arc, + ec.clone(), + tenant_filesystem, + llm.clone(), + ) + // Re-wire forgetting-mechanism tracking. + // Note: no per-tenant memory_event_tx here (the coordinator is global); + // we reuse the global index_manager with the tenant filesystem scope. + .with_index_manager(Arc::new(MemoryIndexManager::new( + self.filesystem.clone(), + ))), + ); // Update vector_engine let mut engine = self.vector_engine.write().await; diff --git a/cortex-mem-tools/src/operations.rs b/cortex-mem-tools/src/operations.rs index dbc77e0..4856315 100644 --- a/cortex-mem-tools/src/operations.rs +++ b/cortex-mem-tools/src/operations.rs @@ -2,6 +2,7 @@ use crate::{errors::*, types::*}; use cortex_mem_core::{ CortexFilesystem, FilesystemOperations, + MemoryIndexManager, SessionConfig, SessionManager, automation::{ @@ -179,14 +180,25 @@ impl MemoryOperations { // LLM-enabled LayerManager for high-quality L0/L1 generation let layer_manager = Arc::new(LayerManager::new(filesystem.clone(), llm_client.clone())); - // Create vector search engine with LLM support for query rewriting - let vector_engine = Arc::new(VectorSearchEngine::with_llm( - vector_store.clone(), - embedding_client.clone(), - filesystem.clone(), - llm_client.clone(), - )); - tracing::info!("Vector search engine created with LLM support for query rewriting"); + // Create shared MemoryIndexManager (used by VectorSearchEngine for archived filtering + // and by MemoryCleanupService for forgetting curve evictions) + let index_manager = Arc::new(MemoryIndexManager::new(filesystem.clone())); + + // Create vector search engine with LLM support for query rewriting. + // Wire up: + // - memory_event_tx → search hits emit MemoryAccessed events (forgetting mechanism) + // - index_manager → archived memories are filtered from search results + let vector_engine = Arc::new( + VectorSearchEngine::with_llm( + vector_store.clone(), + embedding_client.clone(), + filesystem.clone(), + llm_client.clone(), + ) + .with_memory_event_tx(memory_event_tx.clone()) + .with_index_manager(index_manager.clone()), + ); + tracing::info!("Vector search engine created with LLM, event tracking, and archived filter"); // 使用传入的user_id,如果没有则使用tenant_id let actual_user_id = user_id.unwrap_or_else(|| tenant_id.clone()); @@ -206,13 +218,20 @@ impl MemoryOperations { // 创建 AutomationManager:仅负责 L2 消息实时索引 // L0/L1 生成、记忆提取、向量同步均由 MemoryEventCoordinator 处理 + // + // 使用 `with_memory_events` 路径,将 L2 索引请求路由到 MemoryEventCoordinator + // 而不是直接调用 AutoIndexer,实现统一调度和可观测性。 let automation_config = AutomationConfig { auto_index: true, index_on_message: true, // 消息添加时实时索引 L2 index_batch_delay: 1, max_concurrent_tasks: 3, }; - let automation_manager = AutomationManager::new(auto_indexer.clone(), automation_config); + let automation_manager = AutomationManager::with_memory_events( + auto_indexer.clone(), + automation_config, + memory_event_tx.clone(), + ); // 启动 AutomationManager(直接消费 EventBus 事件,无需分裂转发) let tenant_id_for_automation = tenant_id.clone(); @@ -274,6 +293,68 @@ impl MemoryOperations { } }); + // Build VectorSyncManager for MemoryCleanupService. + // The embedding client is required by the constructor but only used for Add/Update + // operations; Delete calls (the only ones cleanup makes) don't touch it. + let vector_sync_for_cleanup = Arc::new( + cortex_mem_core::vector_sync_manager::VectorSyncManager::new( + filesystem.clone(), + embedding_client.clone(), + vector_store.clone(), + ), + ); + + // Launch background MemoryCleanupService (Ebbinghaus forgetting curve eviction). + // Runs every 24 hours; removes archived memories whose strength has decayed below + // the delete threshold and syncs deletions to Qdrant. + { + use cortex_mem_core::{ + memory_cleanup::{MemoryCleanupConfig, MemoryCleanupService}, + memory_index::MemoryScope, + }; + + let cleanup_svc = MemoryCleanupService::new( + index_manager.clone(), + MemoryCleanupConfig::default(), + Some(vector_sync_for_cleanup), + ); + let cleanup_user_id = actual_user_id.clone(); + + tokio::spawn(async move { + // Give the rest of the system time to finish initialising before + // the first cleanup sweep. + tokio::time::sleep(std::time::Duration::from_secs(60)).await; + tracing::info!("MemoryCleanupService background task started"); + + let interval = std::time::Duration::from_secs(24 * 60 * 60); + loop { + // Clean both User-scoped memories (preferences, entities, …) + // and Agent-scoped memories (cases, …). + match cleanup_svc + .run_cleanup(&MemoryScope::User, &cleanup_user_id) + .await + { + Ok(stats) => tracing::info!( + "MemoryCleanup[User]: scanned={}, archived={}, deleted={}", + stats.total_scanned, stats.archived, stats.deleted + ), + Err(e) => tracing::warn!("MemoryCleanup[User] failed: {}", e), + } + match cleanup_svc + .run_cleanup(&MemoryScope::Agent, &cleanup_user_id) + .await + { + Ok(stats) => tracing::info!( + "MemoryCleanup[Agent]: scanned={}, archived={}, deleted={}", + stats.total_scanned, stats.archived, stats.deleted + ), + Err(e) => tracing::warn!("MemoryCleanup[Agent] failed: {}", e), + } + tokio::time::sleep(interval).await; + } + }); + } + Ok(Self { filesystem, session_manager, diff --git a/examples/cortex-mem-tars/src/app.rs b/examples/cortex-mem-tars/src/app.rs index 58c0a0d..e53d1b6 100644 --- a/examples/cortex-mem-tars/src/app.rs +++ b/examples/cortex-mem-tars/src/app.rs @@ -592,8 +592,20 @@ impl App { } } - if self.infrastructure.is_none() { - log::warn!("Agent 未初始化"); + // 如果 rig_agent 为 None(初始化失败),给用户明确的 UI 提示 + if self.rig_agent.is_none() { + log::warn!("Agent 未初始化,无法处理用户消息"); + // 只推送一次,避免每次发消息都重复显示 + let already_notified = self.ui.messages.iter().any(|m| { + m.role == crate::agent::MessageRole::System + && m.content.contains("基础设施初始化失败") + }); + if !already_notified { + self.ui.messages.push(ChatMessage::system( + "⚠️ Agent 尚未初始化。请检查日志或按照上面的提示解决配置问题后重新选择机器人。", + )); + self.ui.auto_scroll = true; + } } // 滚动到底部 - 将在渲染时自动计算 @@ -814,11 +826,27 @@ impl App { } Err(e) => { log::error!("❌ 初始化 AI Agent 失败: {}", e); + self.ui.messages.push(ChatMessage::system(format!( + "❌ AI Agent 初始化失败:{}\n\n请检查配置文件中的 [llm] / [qdrant] / [embedding] 配置项是否正确。", + e + ))); + self.ui.auto_scroll = true; return; } } } else { log::error!("❌ 基础设施未初始化,无法创建 Agent"); + // 向 UI 推送详细的错误提示,让用户知道原因和解决方法 + let config_path = directories::ProjectDirs::from("com", "cortex-mem", "tars") + .map(|d| d.data_dir().join("config.toml").to_string_lossy().to_string()) + .unwrap_or_else(|| "~/Library/Application Support/com.cortex-mem.tars/config.toml".to_string()); + self.ui.messages.push(ChatMessage::system( + format!( + "❌ 基础设施初始化失败,Agent 无法启动。\n\n可能的原因:\n 1. Qdrant 向量数据库未运行(默认地址: http://localhost:6334)\n → 请先启动 Qdrant:docker run -p 6333:6333 -p 6334:6334 qdrant/qdrant\n 2. 配置文件中 API Key / URL 不正确\n → 请检查配置文件: {}\n\n配置文件检查项:\n [qdrant] url / api_key\n [llm] api_base_url / api_key\n [embedding] api_base_url / api_key", + config_path + ) + )); + self.ui.auto_scroll = true; return; } } diff --git a/examples/cortex-mem-tars/src/main.rs b/examples/cortex-mem-tars/src/main.rs index 564a4c3..4db43c4 100644 --- a/examples/cortex-mem-tars/src/main.rs +++ b/examples/cortex-mem-tars/src/main.rs @@ -47,7 +47,11 @@ async fn main() -> Result<()> { Some(Arc::new(inf)) } Err(e) => { - log::warn!("基础设施初始化失败,将使用 Mock Agent: {}", e); + log::error!("基础设施初始化失败: {:#}", e); + log::warn!( + "常见原因: Qdrant 未启动(请运行 docker run -p 6333:6333 -p 6334:6334 qdrant/qdrant)\ + 或 config.toml 中 API 配置不正确" + ); None } }; From 9bb5b3f7a98b3c89e2f6358e1cf9d316d236b36f Mon Sep 17 00:00:00 2001 From: Sopaco Date: Wed, 11 Mar 2026 14:24:01 +0800 Subject: [PATCH 15/20] Force L0/L1 layer generation after session close and add critical language rules for memory extraction Ensure L0/L1 files are generated synchronously after memory extraction in on_session_closed to prevent missed updates on process exit. Also add critical language rules to preserve original language and technical terms in memory extraction. --- .../src/memory_event_coordinator.rs | 53 +++++++++++++++++++ examples/cortex-mem-tars/src/app.rs | 22 +++++--- 2 files changed, 69 insertions(+), 6 deletions(-) diff --git a/cortex-mem-core/src/memory_event_coordinator.rs b/cortex-mem-core/src/memory_event_coordinator.rs index abc9fec..03bb9fb 100644 --- a/cortex-mem-core/src/memory_event_coordinator.rs +++ b/cortex-mem-core/src/memory_event_coordinator.rs @@ -718,6 +718,34 @@ impl MemoryEventCoordinator { "User memory updated for session {}: {} created, {} updated", session_id, user_result.created, user_result.updated ); + + // 2b. Synchronously generate L0/L1 for user and agent directories. + // + // `update_memories` writes files and emits MemoryCreated/MemoryUpdated events via + // `event_tx`, but those events are handled by the background loop *asynchronously*. + // Since `on_session_closed` is called synchronously (without waiting for the loop), + // the background handler hasn't run yet — so L0/L1 files would not be generated + // until the next background iteration, which may happen after the process exits. + // + // Fix: call `layer_updater.update_all_layers` directly here so that L0/L1 is + // generated before we return. + info!("Generating L0/L1 for user/{} after memory extraction...", user_id); + if let Err(e) = self.layer_updater + .update_all_layers(&crate::memory_index::MemoryScope::User, user_id) + .await + { + warn!("Failed to update user L0/L1 layers: {}", e); + } + + if !extracted.cases.is_empty() { + info!("Generating L0/L1 for agent/{} after case memory extraction...", agent_id); + if let Err(e) = self.layer_updater + .update_all_layers(&crate::memory_index::MemoryScope::Agent, agent_id) + .await + { + warn!("Failed to update agent L0/L1 layers: {}", e); + } + } } else { info!("No memories extracted from session {}", session_id); } @@ -879,6 +907,31 @@ impl MemoryEventCoordinator { format!( r#"Analyze the following conversation and extract memories in JSON format. +## CRITICAL LANGUAGE RULES + +1. **Language Consistency** (MANDATORY): + - Extract memories in the SAME language as the conversation + - If conversation is in Chinese (中文) → memories MUST be in Chinese + - If conversation is in English → memories in English + - If mixed language → use the dominant language (>60% of content) + - **DO NOT translate** the conversation content into another language + +2. **Preserve Technical Terms** (MANDATORY): + - Keep technical terminology unchanged in their original language + - Programming languages: Rust, Python, TypeScript, JavaScript, Go + - Frameworks: Cortex Memory, Rig, React, Vue + - Personality types: INTJ, ENTJ, MBTI, DISC + - Proper nouns: names, companies, projects + - Acronyms: LLM, AI, ML, API, HTTP, REST + +3. **Examples**: + ✅ CORRECT (Chinese conversation): + - "Cortex Memory 是基于 Rust 的长期记忆系统" + - "用户喜欢吃牛肉汉堡,搭配酸黄瓜、芝士和可乐" + + ❌ WRONG (Chinese conversation, should NOT translate to English): + - "User likes beef burgers with pickles, cheese, and coke" + ## Instructions Extract the following types of memories: diff --git a/examples/cortex-mem-tars/src/app.rs b/examples/cortex-mem-tars/src/app.rs index e53d1b6..ebdcbc8 100644 --- a/examples/cortex-mem-tars/src/app.rs +++ b/examples/cortex-mem-tars/src/app.rs @@ -1097,12 +1097,22 @@ impl App { { // 同步关闭会话:等待记忆提取 + user/agent 文件写入 + L0/L1 生成全部完成后才返回 log::info!("🧠 同步关闭会话,等待记忆提取与层级文件生成完成..."); - match tenant_ops.close_session_sync(session_id).await { - Ok(_) => { - log::info!("✅ 会话已关闭,记忆提取与 L0/L1 生成完成"); - } - Err(e) => { - log::warn!("⚠️ 会话关闭失败: {}", e); + + // 先检查 session 是否实际存在(若用户未发送任何消息,session 可能从未被创建) + let session_exists = tenant_ops.exists(&format!("cortex://session/{}/.session.json", session_id)) + .await + .unwrap_or(false); + + if !session_exists { + log::info!("ℹ️ 会话 {} 未创建过(本次无消息),跳过关闭流程", session_id); + } else { + match tenant_ops.close_session_sync(session_id).await { + Ok(_) => { + log::info!("✅ 会话已关闭,记忆提取与 L0/L1 生成完成"); + } + Err(e) => { + log::warn!("⚠️ 会话关闭失败: {}", e); + } } } From 30f7ad5ac8179bb891fcb9638bc58425001eb825 Mon Sep 17 00:00:00 2001 From: Sopaco Date: Wed, 11 Mar 2026 16:44:35 +0800 Subject: [PATCH 16/20] Replace env_logger with tracing ecosystem Migrate from env_logger to tracing for better structured logging support. The new setup uses tracing-subscriber with a custom FileLayer that writes logs to both file and memory buffer, while maintaining backward compatibility through tracing-log bridge for existing log:: macros. --- Cargo.lock | 64 +--------- examples/cortex-mem-tars/Cargo.toml | 2 +- examples/cortex-mem-tars/src/agent.rs | 81 +++++++----- examples/cortex-mem-tars/src/app.rs | 18 ++- examples/cortex-mem-tars/src/logger.rs | 165 +++++++++++++++++-------- 5 files changed, 176 insertions(+), 154 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8a0bd48..5b4c3c0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -737,7 +737,6 @@ dependencies = [ "cpal", "crossterm", "directories 6.0.0", - "env_logger", "futures", "libc", "log", @@ -752,6 +751,7 @@ dependencies = [ "tokio", "toml", "tracing", + "tracing-log", "tracing-subscriber", "tui-markdown", "tui-textarea", @@ -1097,29 +1097,6 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "env_filter" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bf3c259d255ca70051b30e2e95b5446cdb8949ac4cd22c0d7fd634d89f568e2" -dependencies = [ - "log", - "regex", -] - -[[package]] -name = "env_logger" -version = "0.11.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13c863f0904021b108aa8b2f55046443e6b1ebde8fd4a15c399893aae4fa069f" -dependencies = [ - "anstream", - "anstyle", - "env_filter", - "jiff", - "log", -] - [[package]] name = "equivalent" version = "1.0.2" @@ -1787,30 +1764,6 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" -[[package]] -name = "jiff" -version = "0.2.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e67e8da4c49d6d9909fe03361f9b620f58898859f5c7aded68351e85e71ecf50" -dependencies = [ - "jiff-static", - "log", - "portable-atomic", - "portable-atomic-util", - "serde_core", -] - -[[package]] -name = "jiff-static" -version = "0.2.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0c84ee7f197eca9a86c6fd6cb771e55eb991632f15f2bc3ca6ec838929e6e78" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "jni" version = "0.21.1" @@ -2499,21 +2452,6 @@ dependencies = [ "time", ] -[[package]] -name = "portable-atomic" -version = "1.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49" - -[[package]] -name = "portable-atomic-util" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a9db96d7fa8782dd8c15ce32ffe8680bbd1e978a43bf51a34d39483540495f5" -dependencies = [ - "portable-atomic", -] - [[package]] name = "potential_utf" version = "0.1.4" diff --git a/examples/cortex-mem-tars/Cargo.toml b/examples/cortex-mem-tars/Cargo.toml index 2d6d267..af83633 100644 --- a/examples/cortex-mem-tars/Cargo.toml +++ b/examples/cortex-mem-tars/Cargo.toml @@ -41,9 +41,9 @@ directories = "6.0" # Logging log = "0.4" -env_logger = "0.11" tracing = "0.1" tracing-subscriber = { version = "0.3", features = ["env-filter"] } +tracing-log = "0.2" once_cell = "1.21" # Async diff --git a/examples/cortex-mem-tars/src/agent.rs b/examples/cortex-mem-tars/src/agent.rs index 0ce2321..780d2b9 100644 --- a/examples/cortex-mem-tars/src/agent.rs +++ b/examples/cortex-mem-tars/src/agent.rs @@ -98,14 +98,61 @@ pub async fn create_memory_agent( // 获取租户 operations 用于外部使用 let tenant_operations = memory_tools.operations().clone(); - // 创建 Rig LLM 客户端用于 Agent 对话 + // 使用共享的 rebuild_rig_agent 构建 Agent(复用已有 MemoryOperations) + let agent = rebuild_rig_agent(config, tenant_operations.clone(), user_info, bot_system_prompt, agent_id)?; + + Ok((agent, tenant_operations)) +} + +/// 在已有 MemoryOperations 上重建 Rig Agent(更新 system prompt) +/// +/// 当 user_info 或 bot_system_prompt 发生变化时,只需重新构建顶层的 +/// Rig Agent,而无需重建底层 MemoryOperations(Qdrant 连接、Embedding +/// 客户端、MemoryEventCoordinator 等全部复用),避免重复初始化基础设施。 +pub fn rebuild_rig_agent( + config: &cortex_mem_config::Config, + tenant_operations: Arc, + user_info: Option<&str>, + bot_system_prompt: Option<&str>, + agent_id: &str, +) -> Result, Box> { + // 用已有的 MemoryOperations 构建 MemoryTools(轻量包装,无 IO) + let memory_tools = cortex_mem_rig::MemoryTools::new(tenant_operations); + + // 构建 system prompt + let base_system_prompt = build_system_prompt(user_info, agent_id); + let system_prompt = if let Some(bot_prompt) = bot_system_prompt { + format!("{}\n\n你的角色设定:\n{}", base_system_prompt, bot_prompt) + } else { + base_system_prompt + }; + + // 创建 Rig LLM 客户端(仅用于对话,轻量级,无网络连接建立) let llm_client = Client::builder() .api_key(&config.llm.api_key) .base_url(&config.llm.api_base_url) .build()?; - // 构建 system prompt - let base_system_prompt = if let Some(info) = user_info { + use rig::client::CompletionClient; + let agent = llm_client + .completions_api() + .agent(&config.llm.model_efficient) + .preamble(&system_prompt) + .default_max_turns(30) + .tool(memory_tools.search_tool()) + .tool(memory_tools.find_tool()) + .tool(memory_tools.abstract_tool()) + .tool(memory_tools.overview_tool()) + .tool(memory_tools.read_tool()) + .tool(memory_tools.ls_tool()) + .build(); + + Ok(agent) +} + +/// 构建 system prompt(从 create_memory_agent 中提取的共享逻辑) +fn build_system_prompt(user_info: Option<&str>, agent_id: &str) -> String { + if let Some(info) = user_info { format!( r#"你是一个拥有持久分层记忆能力的智能 AI 助手(TARS)。 @@ -450,33 +497,7 @@ URI 来自 ls 或 search 结果中的具体文件路径,例如: current_time = chrono::Local::now().format("%Y年%m月%d日 %H:%M:%S"), agent_id = agent_id ) - }; - - // 追加机器人系统提示词 - let system_prompt = if let Some(bot_prompt) = bot_system_prompt { - format!("{}\n\n你的角色设定:\n{}", base_system_prompt, bot_prompt) - } else { - base_system_prompt - }; - - use rig::client::CompletionClient; - let completion_model = llm_client - .completions_api() // Use completions API to get CompletionModel - .agent(&config.llm.model_efficient) - .preamble(&system_prompt) - .default_max_turns(30) // 🔧 设置默认max_turns为30,避免频繁触发MaxTurnError - // 搜索工具(最常用) - .tool(memory_tools.search_tool()) - .tool(memory_tools.find_tool()) - // 分层访问工具 - .tool(memory_tools.abstract_tool()) - .tool(memory_tools.overview_tool()) - .tool(memory_tools.read_tool()) - // 文件系统工具 - .tool(memory_tools.ls_tool()) - .build(); - - Ok((completion_model, tenant_operations)) + } } /// 从记忆中提取用户基本信息 diff --git a/examples/cortex-mem-tars/src/app.rs b/examples/cortex-mem-tars/src/app.rs index ebdcbc8..00ad28b 100644 --- a/examples/cortex-mem-tars/src/app.rs +++ b/examples/cortex-mem-tars/src/app.rs @@ -1,4 +1,4 @@ -use crate::agent::{AgentChatHandler, ChatMessage, create_memory_agent, extract_user_basic_info}; +use crate::agent::{AgentChatHandler, ChatMessage, create_memory_agent, extract_user_basic_info, rebuild_rig_agent}; use crate::config::{BotConfig, ConfigManager}; use crate::infrastructure::Infrastructure; use crate::logger::LogManager; @@ -797,25 +797,23 @@ impl App { } }; - // 如果有用户信息,需要重新创建带用户信息的 Agent + // 如果有用户信息,用 rebuild_rig_agent 重建 Agent(复用已有 + // MemoryOperations,不重建 Qdrant/Embedding 等底层基础设施) if user_info.is_some() { let config = infrastructure.config(); - match create_memory_agent( - config.cortex.data_dir(), + match rebuild_rig_agent( config, + tenant_ops.clone(), user_info.as_deref(), Some(bot.system_prompt.as_str()), &bot.id, - &self.user_id, - ) - .await - { - Ok((agent_with_userinfo, _)) => { + ) { + Ok(agent_with_userinfo) => { self.rig_agent = Some(agent_with_userinfo); log::info!("✅ 已创建带用户信息的 Agent"); } Err(e) => { - log::error!("重新创建带用户信息的 Agent 失败: {}", e); + log::error!("重建带用户信息的 Agent 失败: {}", e); self.rig_agent = Some(rig_agent); } } diff --git a/examples/cortex-mem-tars/src/logger.rs b/examples/cortex-mem-tars/src/logger.rs index acd235b..47c7ff5 100644 --- a/examples/cortex-mem-tars/src/logger.rs +++ b/examples/cortex-mem-tars/src/logger.rs @@ -1,11 +1,12 @@ use anyhow::{Context, Result}; -use log::{Level, LevelFilter, Metadata, Record}; use std::fs::{File, OpenOptions}; use std::io::Write; use std::path::{Path, PathBuf}; use std::sync::{Arc, Mutex}; +use tracing::{Level, Subscriber}; +use tracing_subscriber::{Layer, layer::SubscriberExt, util::SubscriberInitExt}; -/// 日志管理器 +/// 日志管理器(保持向后兼容的接口) pub struct LogManager { #[allow(dead_code)] log_file: PathBuf, @@ -23,7 +24,7 @@ impl LogManager { std::fs::create_dir_all(parent).context("无法创建日志目录")?; } - // 打开或创建日志文件 + // 打开或创建日志文件(追加模式) let file = OpenOptions::new() .create(true) .append(true) @@ -37,35 +38,13 @@ impl LogManager { }) } - /// 写入日志 - pub fn write(&self, level: Level, message: &str) -> Result<()> { - let timestamp = chrono::Local::now().format("%Y-%m-%d %H:%M:%S%.3f"); - let log_line = format!("[{} {}] {}", timestamp, level, message); - - // 写入文件 - let mut file = self.file.lock().map_err(|e| anyhow::anyhow!("无法获取文件锁: {}", e))?; - writeln!(file, "{}", log_line) - .context("无法写入日志")?; - file.flush().context("无法刷新日志")?; - - // 添加到内存中的日志行 - let mut lines = self.lines.lock().map_err(|e| anyhow::anyhow!("无法获取日志行锁: {}", e))?; - lines.push(log_line.clone()); - - // 限制内存中的日志行数 - if lines.len() > 1000 { - let excess = lines.len() - 1000; - lines.drain(0..excess); - } - - Ok(()) - } - - /// 读取日志内容 + /// 读取内存中的最近日志 pub fn read_logs(&self, max_lines: usize) -> Result> { - let lines = self.lines.lock().map_err(|e| anyhow::anyhow!("无法获取日志行锁: {}", e))?; + let lines = self + .lines + .lock() + .map_err(|e| anyhow::anyhow!("无法获取日志行锁: {}", e))?; - // 返回最后 max_lines 行 if lines.len() > max_lines { Ok(lines[lines.len() - max_lines..].to_vec()) } else { @@ -74,44 +53,130 @@ impl LogManager { } } -/// 自定义 Logger -struct SimpleLogger { +/// 自定义 tracing Layer,把日志同时写入文件和内存缓冲 +struct FileLayer { manager: Arc, } -impl log::Log for SimpleLogger { - fn enabled(&self, metadata: &Metadata) -> bool { - // 🔧 过滤TRACE和DEBUG日志,只保留INFO及以上级别 - metadata.level() <= Level::Info +impl tracing_subscriber::registry::LookupSpan<'a>> Layer + for FileLayer +{ + fn on_event( + &self, + event: &tracing::Event<'_>, + _ctx: tracing_subscriber::layer::Context<'_, S>, + ) { + let meta = event.metadata(); + let level = meta.level(); + + // INFO 级别以上才写入(过滤 DEBUG/TRACE 减少噪音) + if *level > Level::INFO { + return; + } + + // 提取消息和来自 log-bridge 的额外字段 + let mut visitor = FullVisitor::default(); + event.record(&mut visitor); + + // target 优先级: + // 1. 若是 tracing-log bridge 转来的事件,target 就是 `"log"`, + // 此时使用 bridge 附加的 `log.module_path` 字段作为真实来源 + // 2. 否则直接使用 metadata 中的 target(即 tracing 宏调用点的模块路径) + let target = if meta.target() == "log" { + visitor + .log_module_path + .as_deref() + .unwrap_or("tars") + .to_string() + } else { + meta.target().to_string() + }; + + let timestamp = chrono::Local::now().format("%Y-%m-%d %H:%M:%S%.3f"); + let log_line = format!( + "[{} {}] [{}] {}", + timestamp, level, target, visitor.message + ); + + // 写入文件(静默失败,不干扰 TUI) + if let Ok(mut file) = self.manager.file.lock() { + let _ = writeln!(file, "{}", log_line); + let _ = file.flush(); + } + + // 写入内存缓冲 + if let Ok(mut lines) = self.manager.lines.lock() { + lines.push(log_line); + if lines.len() > 1000 { + let excess = lines.len() - 1000; + lines.drain(0..excess); + } + } } +} + +/// 从 tracing Event 中提取所有关心的字段: +/// - `message` — 日志正文 +/// - `log.module_path` — tracing-log bridge 附加的来源模块路径 +/// - `log.target` — tracing-log bridge 附加的原始 target(备用) +#[derive(Default)] +struct FullVisitor { + message: String, + log_module_path: Option, +} - fn log(&self, record: &Record) { - if self.enabled(record.metadata()) { - let message = format!("{}", record.args()); - // 🔇 静默处理日志写入失败,避免干扰TUI - let _ = self.manager.write(record.level(), &message); +impl tracing::field::Visit for FullVisitor { + fn record_str(&mut self, field: &tracing::field::Field, value: &str) { + match field.name() { + "message" => self.message.push_str(value), + "log.module_path" => self.log_module_path = Some(value.to_string()), + _ => {} } } - fn flush(&self) {} + fn record_debug(&mut self, field: &tracing::field::Field, value: &dyn std::fmt::Debug) { + if field.name() == "message" { + use std::fmt::Write; + let _ = write!(self.message, "{:?}", value); + } + } } /// 初始化日志系统 +/// +/// 统一用 `tracing` 生态作为日志后端: +/// - `cortex-mem-core` 等子 crate 使用 `tracing::info!` → 直接被 subscriber 捕获 +/// - tars 自身使用 `log::info!` → 通过 `tracing_log::LogTracer` 转发给 tracing +/// +/// 所有日志都写入同一个 `app.log` 文件,并在内存中保留最近 1000 条供查阅。 +/// +/// 可通过环境变量 `RUST_LOG` 覆盖默认级别(例如 `RUST_LOG=debug` 开启详细输出)。 pub fn init_logger(log_dir: &Path) -> Result> { let manager = Arc::new(LogManager::new(log_dir)?); - // 创建自定义 logger - let logger = SimpleLogger { + // 桥接 `log` → tracing:让 log::info! / log::warn! 等也被 tracing 捕获。 + // with_max_level 设为 INFO,过滤掉 log crate 产生的 DEBUG/TRACE 事件。 + tracing_log::LogTracer::builder() + .with_max_level(log::LevelFilter::Info) + .init() + .ok(); // 若已初始化则忽略(防止测试环境重复 init 报错) + + let file_layer = FileLayer { manager: Arc::clone(&manager), }; - // 设置全局 logger - log::set_logger(Box::leak(Box::new(logger))) - .map_err(|e| anyhow::anyhow!("无法设置 logger: {}", e))?; - log::set_max_level(LevelFilter::Info); + // 环境变量 RUST_LOG 可覆盖默认级别 + let env_filter = tracing_subscriber::EnvFilter::try_from_default_env() + .unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info")); + + tracing_subscriber::registry() + .with(env_filter) + .with(file_layer) + .try_init() + .ok(); // 若已初始化则忽略 - log::info!("日志系统初始化完成"); - log::info!("日志文件路径: {}", log_dir.display()); + tracing::info!("日志系统初始化完成"); + tracing::info!("日志文件路径: {}", log_dir.display()); Ok(manager) } From 9c501c5952df8ded5d930ac59531d5a280b505a5 Mon Sep 17 00:00:00 2001 From: Sopaco Date: Wed, 11 Mar 2026 17:41:10 +0800 Subject: [PATCH 17/20] Bump version to 2.7.0 and optimize timeline layer generation Skip timeline layer generation when L0/L1 files already exist to prevent redundant operations. Also optimize vector database indexing to only process current session files instead of all historical data. --- Cargo.lock | 16 +++++------ Cargo.toml | 2 +- cortex-mem-core/src/automation/sync.rs | 38 +++++++++++++++++++++++--- examples/cortex-mem-tars/src/app.rs | 14 +++++++--- 4 files changed, 53 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5b4c3c0..af4c284 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -600,7 +600,7 @@ dependencies = [ [[package]] name = "cortex-mem-cli" -version = "2.5.1" +version = "2.7.0" dependencies = [ "anyhow", "assert_cmd", @@ -621,7 +621,7 @@ dependencies = [ [[package]] name = "cortex-mem-config" -version = "2.5.1" +version = "2.7.0" dependencies = [ "anyhow", "directories 5.0.1", @@ -632,7 +632,7 @@ dependencies = [ [[package]] name = "cortex-mem-core" -version = "2.5.1" +version = "2.7.0" dependencies = [ "anyhow", "async-trait", @@ -660,7 +660,7 @@ dependencies = [ [[package]] name = "cortex-mem-mcp" -version = "2.5.1" +version = "2.7.0" dependencies = [ "anyhow", "async-trait", @@ -683,7 +683,7 @@ dependencies = [ [[package]] name = "cortex-mem-rig" -version = "2.5.1" +version = "2.7.0" dependencies = [ "anyhow", "async-trait", @@ -699,7 +699,7 @@ dependencies = [ [[package]] name = "cortex-mem-service" -version = "2.5.1" +version = "2.7.0" dependencies = [ "anyhow", "axum", @@ -722,7 +722,7 @@ dependencies = [ [[package]] name = "cortex-mem-tars" -version = "2.5.1" +version = "2.7.0" dependencies = [ "anyhow", "async-stream", @@ -762,7 +762,7 @@ dependencies = [ [[package]] name = "cortex-mem-tools" -version = "2.5.1" +version = "2.7.0" dependencies = [ "anyhow", "async-trait", diff --git a/Cargo.toml b/Cargo.toml index 93176a4..f6d57aa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,7 +12,7 @@ members = [ ] [workspace.package] -version = "2.5.1" +version = "2.7.0" edition = "2024" rust-version = "1.86" authors = ["Sopaco"] diff --git a/cortex-mem-core/src/automation/sync.rs b/cortex-mem-core/src/automation/sync.rs index c26d811..8a89e5e 100644 --- a/cortex-mem-core/src/automation/sync.rs +++ b/cortex-mem-core/src/automation/sync.rs @@ -249,13 +249,43 @@ impl SyncManager { let mut stats = SyncStats::default(); // ✅ Generate timeline layers ONLY at session root level (not subdirectories) - // This prevents overwriting session-level summaries with day-level summaries + // This prevents overwriting session-level summaries with day-level summaries. + // + // Skip if BOTH L0 (.abstract.md) and L1 (.overview.md) already exist. + // + // Design rationale: + // - The authoritative L0/L1 generator for a session is + // `CascadeLayerUpdater::update_timeline_layers`, called inside + // `MemoryEventCoordinator::on_session_closed` (via `close_session_sync`). + // That path runs on every session close and uses content-hash change + // detection to avoid redundant LLM calls. + // - This code path (`SyncManager`) is only a fallback for the vector-index + // pass at exit time. Historical sessions that are already fully closed + // have stable L0/L1 files that don't need regeneration. + // - The current session's L0/L1 are guaranteed to be generated by the + // `close_session_sync` call that precedes this sync pass; so the + // exist-check skip is safe here too. let is_session_timeline_root = uri.ends_with("/timeline") && !uri.contains("/timeline/"); if is_session_timeline_root { - if let Err(e) = self.generate_timeline_layers(uri).await { - warn!("Failed to generate timeline layers for {}: {}", uri, e); + let l0_exists = self + .filesystem + .exists(&format!("{}/.abstract.md", uri)) + .await + .unwrap_or(false); + let l1_exists = self + .filesystem + .exists(&format!("{}/.overview.md", uri)) + .await + .unwrap_or(false); + + if l0_exists && l1_exists { + debug!("Timeline layers already exist for {}, skipping generation", uri); } else { - info!("Generated session-level timeline layers for {}", uri); + if let Err(e) = self.generate_timeline_layers(uri).await { + warn!("Failed to generate timeline layers for {}: {}", uri, e); + } else { + info!("Generated session-level timeline layers for {}", uri); + } } } diff --git a/examples/cortex-mem-tars/src/app.rs b/examples/cortex-mem-tars/src/app.rs index 00ad28b..6a0c489 100644 --- a/examples/cortex-mem-tars/src/app.rs +++ b/examples/cortex-mem-tars/src/app.rs @@ -1114,10 +1114,16 @@ impl App { } } - // 索引所有文件到向量数据库(此时 L0/L1 已全部生成,索引数据完整) - log::info!("📊 开始索引所有文件到向量数据库..."); - let index_timeout = tokio::time::Duration::from_secs(360); - match tokio::time::timeout(index_timeout, tenant_ops.index_all_files()).await { + // 只索引当前会话新产生的文件(L0/L1/L2),不全量扫描所有历史 session + // 全量扫描会对每个历史 session 重新生成 timeline L0/L1,耗时极长 + log::info!("📊 开始索引当前会话文件到向量数据库..."); + let index_timeout = tokio::time::Duration::from_secs(120); + match tokio::time::timeout( + index_timeout, + tenant_ops.index_session_files(session_id), + ) + .await + { Ok(Ok(stats)) => { log::info!( "✅ 索引完成: 总计 {} 个文件, {} 个已索引, {} 个跳过", From 435343f36c4af581ad655a234a2b40a062429f4c Mon Sep 17 00:00:00 2001 From: Sopaco Date: Wed, 11 Mar 2026 21:22:35 +0800 Subject: [PATCH 18/20] Add hash-based content change detection and metadata stripping to cascade layer updates --- cortex-mem-core/src/cascade_layer_updater.rs | 133 ++++++++++++------- examples/cortex-mem-tars/src/app.rs | 2 +- 2 files changed, 88 insertions(+), 47 deletions(-) diff --git a/cortex-mem-core/src/cascade_layer_updater.rs b/cortex-mem-core/src/cascade_layer_updater.rs index c74d4ec..3fb7f1d 100644 --- a/cortex-mem-core/src/cascade_layer_updater.rs +++ b/cortex-mem-core/src/cascade_layer_updater.rs @@ -128,28 +128,46 @@ impl CascadeLayerUpdater { content.hash(&mut hasher); format!("{:x}", hasher.finish()) } + + /// Strip metadata lines added by this module so they don't pollute + /// content aggregation used for hash comparison and parent-level summaries. + /// + /// Stripped lines: + /// - `` — source hash footer + /// - `**Added**: ...` — timestamp footer + fn strip_metadata_lines(content: &str) -> String { + content + .lines() + .filter(|line| { + !line.starts_with("` + /// This records the hash of the *source* content that was fed to the LLM, + /// not the hash of the generated summary text itself. async fn should_update_layer(&self, layer_uri: &str, new_content_hash: &str) -> Result { - // Try to read existing layer file match self.filesystem.read(layer_uri).await { Ok(existing_content) => { - // Calculate hash of existing content (excluding timestamp) - // Remove timestamp line for comparison - let content_without_ts = existing_content - .lines() - .filter(|line| !line.starts_with("**Added**:")) - .collect::>() - .join("\n"); - - let old_hash = self.calculate_content_hash(&content_without_ts); - - // Only update if content changed - Ok(old_hash != new_content_hash) + // Look for stored source-hash comment in the file + for line in existing_content.lines() { + if let Some(rest) = line.strip_prefix("") { + return Ok(stored_hash != new_content_hash); + } + } + } + // No hash found in old file (legacy format) → regenerate + Ok(true) } Err(_) => { // File doesn't exist, need to create @@ -284,10 +302,10 @@ impl CascadeLayerUpdater { stats.updated_count += 1; } - // Add timestamp + // Add timestamp + source hash footer (used by should_update_layer) let timestamp = chrono::Utc::now().format("%Y-%m-%d %H:%M:%S UTC"); - let abstract_with_ts = format!("{}\n\n**Added**: {}", abstract_text, timestamp); - let overview_with_ts = format!("{}\n\n---\n\n**Added**: {}", overview, timestamp); + let abstract_with_ts = format!("{}\n\n**Added**: {}\n", abstract_text, timestamp, new_content_hash); + let overview_with_ts = format!("{}\n\n---\n\n**Added**: {}\n", overview, timestamp, new_content_hash); // Write layer files let overview_uri = format!("{}/.overview.md", dir_uri); @@ -441,10 +459,10 @@ impl CascadeLayerUpdater { stats.updated_count += 1; } - // Add timestamp + // Add timestamp + source hash footer (used by should_update_layer) let timestamp = chrono::Utc::now().format("%Y-%m-%d %H:%M:%S UTC"); - let abstract_with_ts = format!("{}\n\n**Added**: {}", abstract_text, timestamp); - let overview_with_ts = format!("{}\n\n---\n\n**Added**: {}", overview, timestamp); + let abstract_with_ts = format!("{}\n\n**Added**: {}\n", abstract_text, timestamp, new_content_hash); + let overview_with_ts = format!("{}\n\n---\n\n**Added**: {}\n", overview, timestamp, new_content_hash); // Write layer files let overview_uri = format!("{}/.overview.md", root_uri); @@ -486,7 +504,9 @@ impl CascadeLayerUpdater { match self.filesystem.read(&entry.uri).await { Ok(file_content) => { content.push_str(&format!("\n\n=== {} ===\n\n", entry.name)); - content.push_str(&file_content); + // Strip source-hash footer so it doesn't pollute parent-level aggregation + let stripped = Self::strip_metadata_lines(&file_content); + content.push_str(&stripped); file_count += 1; } Err(e) => { @@ -527,7 +547,9 @@ impl CascadeLayerUpdater { let abstract_uri = format!("{}/.abstract.md", entry.uri); if let Ok(abstract_content) = self.filesystem.read(&abstract_uri).await { content.push_str(&format!("\n\n## {}\n\n", entry.name)); - content.push_str(&abstract_content); + // Strip source-hash footer so it doesn't pollute parent-level aggregation + let stripped = Self::strip_metadata_lines(&abstract_content); + content.push_str(&stripped); dir_count += 1; } } @@ -584,6 +606,16 @@ impl CascadeLayerUpdater { return Ok(()); } + // 🔧 Hash check: skip if content hasn't changed since last generation + let abstract_uri = format!("{}/.abstract.md", timeline_uri); + let content_hash = self.calculate_content_hash(&content); + if !self.should_update_layer(&abstract_uri, &content_hash).await? { + debug!("⏭️ Skipped timeline L0/L1 for session {} (content unchanged)", session_id); + // Still update date-level layers (they have their own hash checks) + self.update_timeline_date_layers(&timeline_uri).await?; + return Ok(()); + } + // Generate L0 abstract let abstract_text = self.l0_generator .generate_with_llm(&content, &self.llm_client, &[]) @@ -594,13 +626,12 @@ impl CascadeLayerUpdater { .generate_with_llm(&content, &self.llm_client) .await?; - // Add timestamp + // Add timestamp + source hash footer (used by should_update_layer for change detection) let timestamp = chrono::Utc::now().format("%Y-%m-%d %H:%M:%S UTC"); - let abstract_with_ts = format!("{}\n\n**Added**: {}", abstract_text, timestamp); - let overview_with_ts = format!("{}\n\n---\n\n**Added**: {}", overview, timestamp); + let abstract_with_ts = format!("{}\n\n**Added**: {}\n", abstract_text, timestamp, content_hash); + let overview_with_ts = format!("{}\n\n---\n\n**Added**: {}\n", overview, timestamp, content_hash); // Write layer files - let abstract_uri = format!("{}/.abstract.md", timeline_uri); let overview_uri = format!("{}/.overview.md", timeline_uri); self.filesystem.write(&abstract_uri, &abstract_with_ts).await?; @@ -697,17 +728,22 @@ impl CascadeLayerUpdater { let month_content = self.aggregate_directory_content_recursive(&entry.uri).await?; if !month_content.is_empty() { - let abstract_text = self.l0_generator - .generate_with_llm(&month_content, &self.llm_client, &[]) - .await?; - - let timestamp = chrono::Utc::now().format("%Y-%m-%d %H:%M:%S UTC"); - let abstract_with_ts = format!("{}\n\n**Added**: {}", abstract_text, timestamp); - let abstract_uri = format!("{}/.abstract.md", entry.uri); - self.filesystem.write(&abstract_uri, &abstract_with_ts).await?; - - debug!("Updated month-level L0 for {}", entry.uri); + let content_hash = self.calculate_content_hash(&month_content); + // Skip if content hasn't changed + if self.should_update_layer(&abstract_uri, &content_hash).await? { + let abstract_text = self.l0_generator + .generate_with_llm(&month_content, &self.llm_client, &[]) + .await?; + + let timestamp = chrono::Utc::now().format("%Y-%m-%d %H:%M:%S UTC"); + let abstract_with_ts = format!("{}\n\n**Added**: {}\n", abstract_text, timestamp, content_hash); + + self.filesystem.write(&abstract_uri, &abstract_with_ts).await?; + debug!("Updated month-level L0 for {}", entry.uri); + } else { + debug!("Skipped month-level L0 for {} (content unchanged)", entry.uri); + } } // Process day directories within @@ -729,17 +765,22 @@ impl CascadeLayerUpdater { let day_content = self.aggregate_directory_content(&entry.uri).await?; if !day_content.is_empty() { - let abstract_text = self.l0_generator - .generate_with_llm(&day_content, &self.llm_client, &[]) - .await?; - - let timestamp = chrono::Utc::now().format("%Y-%m-%d %H:%M:%S UTC"); - let abstract_with_ts = format!("{}\n\n**Added**: {}", abstract_text, timestamp); - let abstract_uri = format!("{}/.abstract.md", entry.uri); - self.filesystem.write(&abstract_uri, &abstract_with_ts).await?; - - debug!("Updated day-level L0 for {}", entry.uri); + let content_hash = self.calculate_content_hash(&day_content); + // Skip if content hasn't changed + if self.should_update_layer(&abstract_uri, &content_hash).await? { + let abstract_text = self.l0_generator + .generate_with_llm(&day_content, &self.llm_client, &[]) + .await?; + + let timestamp = chrono::Utc::now().format("%Y-%m-%d %H:%M:%S UTC"); + let abstract_with_ts = format!("{}\n\n**Added**: {}\n", abstract_text, timestamp, content_hash); + + self.filesystem.write(&abstract_uri, &abstract_with_ts).await?; + debug!("Updated day-level L0 for {}", entry.uri); + } else { + debug!("Skipped day-level L0 for {} (content unchanged)", entry.uri); + } } } } diff --git a/examples/cortex-mem-tars/src/app.rs b/examples/cortex-mem-tars/src/app.rs index 6a0c489..77dadb0 100644 --- a/examples/cortex-mem-tars/src/app.rs +++ b/examples/cortex-mem-tars/src/app.rs @@ -1117,7 +1117,7 @@ impl App { // 只索引当前会话新产生的文件(L0/L1/L2),不全量扫描所有历史 session // 全量扫描会对每个历史 session 重新生成 timeline L0/L1,耗时极长 log::info!("📊 开始索引当前会话文件到向量数据库..."); - let index_timeout = tokio::time::Duration::from_secs(120); + let index_timeout = tokio::time::Duration::from_secs(360); match tokio::time::timeout( index_timeout, tenant_ops.index_session_files(session_id), From e2b20edbe4b44a32fe0a974e0247eece25ef9ef2 Mon Sep 17 00:00:00 2001 From: Sopaco Date: Wed, 11 Mar 2026 21:54:31 +0800 Subject: [PATCH 19/20] Add scope-based suppression for layer cascade updates Prevent duplicate layer cascade updates when session closure triggers update_all_layers simultaneously with background event processing of MemoryCreated/MemoryUpdated events. --- .../src/memory_event_coordinator.rs | 96 +++++++++++++++---- 1 file changed, 76 insertions(+), 20 deletions(-) diff --git a/cortex-mem-core/src/memory_event_coordinator.rs b/cortex-mem-core/src/memory_event_coordinator.rs index 03bb9fb..1cded28 100644 --- a/cortex-mem-core/src/memory_event_coordinator.rs +++ b/cortex-mem-core/src/memory_event_coordinator.rs @@ -77,6 +77,15 @@ pub struct MemoryEventCoordinator { task_completion_tx: watch::Sender, /// 任务完成接收器(用于外部等待) task_completion_rx: watch::Receiver, + /// 抑制后台层级联更新的 scope 集合(格式:"scope/owner_id")。 + /// + /// 当 `on_session_closed` 正在同步执行 `update_all_layers` 时, + /// 将对应的 "scope/owner_id" 加入此集合,防止后台 event loop 处理 + /// MemoryCreated/MemoryUpdated 事件时重复触发级联更新。 + /// update_all_layers 完成后移除对应条目,恢复正常处理。 + /// + /// 使用 scope-granular 集合而非全局 bool,避免误压制不同 scope 的用户。 + suppress_layer_cascade_scopes: Arc>>, } impl MemoryEventCoordinator { @@ -180,6 +189,7 @@ impl MemoryEventCoordinator { pending_tasks, task_completion_tx, task_completion_rx, + suppress_layer_cascade_scopes: Arc::new(tokio::sync::RwLock::new(std::collections::HashSet::new())), }); (coordinator, event_tx, event_rx) @@ -547,17 +557,30 @@ impl MemoryEventCoordinator { memory_id, memory_type, scope, owner_id ); - // Trigger layer cascade update - self.layer_updater - .on_memory_changed( - scope.clone(), - owner_id.to_string(), - file_uri.to_string(), - ChangeType::Add, - ) - .await?; + // Skip layer cascade if suppressed for this scope (e.g. during on_session_closed's + // update_all_layers), but always continue with vector sync so the new file is indexed. + let key = format!("{}/{}", scope, owner_id); + let suppress_cascade = self + .suppress_layer_cascade_scopes + .read() + .await + .contains(&key); - // Trigger vector sync + if !suppress_cascade { + // Trigger layer cascade update + self.layer_updater + .on_memory_changed( + scope.clone(), + owner_id.to_string(), + file_uri.to_string(), + ChangeType::Add, + ) + .await?; + } else { + debug!("Layer cascade suppressed for MemoryCreated: {}", key); + } + + // Always trigger vector sync (suppression only skips layer cascade, not vector indexing) self.vector_sync .sync_file_change(file_uri, ChangeType::Add) .await?; @@ -583,17 +606,29 @@ impl MemoryEventCoordinator { memory_id, memory_type, scope, owner_id ); - // Trigger layer cascade update - self.layer_updater - .on_memory_changed( - scope.clone(), - owner_id.to_string(), - file_uri.to_string(), - ChangeType::Update, - ) - .await?; + // Skip layer cascade if suppressed for this scope, but always continue vector sync. + let key = format!("{}/{}", scope, owner_id); + let suppress_cascade = self + .suppress_layer_cascade_scopes + .read() + .await + .contains(&key); + + if !suppress_cascade { + // Trigger layer cascade update + self.layer_updater + .on_memory_changed( + scope.clone(), + owner_id.to_string(), + file_uri.to_string(), + ChangeType::Update, + ) + .await?; + } else { + debug!("Layer cascade suppressed for MemoryUpdated: {}", key); + } - // Trigger vector sync + // Always trigger vector sync self.vector_sync .sync_file_change(file_uri, ChangeType::Update) .await?; @@ -709,6 +744,20 @@ impl MemoryEventCoordinator { // 2. Update user memories if !extracted.is_empty() { + // 2a. 抑制后台 event loop 对 user/agent scope 的级联更新: + // update_memories 写文件时会发出 MemoryCreated/MemoryUpdated 事件, + // 后台 loop 处理这些事件会触发 on_memory_changed → update_directory_layers + // → update_root_layers,与下方步骤 2b 的 update_all_layers 完全重复。 + // 只抑制当前 user/agent scope,不影响并发中其他 scope 的正常处理。 + // 注意:向量同步不受抑制影响,仍会正常执行。 + let user_key = format!("{}/{}", crate::memory_index::MemoryScope::User, user_id); + let agent_key = format!("{}/{}", crate::memory_index::MemoryScope::Agent, agent_id); + { + let mut set = self.suppress_layer_cascade_scopes.write().await; + set.insert(user_key.clone()); + set.insert(agent_key.clone()); + } + let user_result = self .memory_updater .update_memories(user_id, agent_id, session_id, &extracted) @@ -746,6 +795,13 @@ impl MemoryEventCoordinator { warn!("Failed to update agent L0/L1 layers: {}", e); } } + + // 2c. 恢复后台级联更新(移除 scope 抑制) + { + let mut set = self.suppress_layer_cascade_scopes.write().await; + set.remove(&user_key); + set.remove(&agent_key); + } } else { info!("No memories extracted from session {}", session_id); } From 8025f92909b831b89253f416cacda9ea4731aaa1 Mon Sep 17 00:00:00 2001 From: Sopaco Date: Wed, 11 Mar 2026 22:05:54 +0800 Subject: [PATCH 20/20] Update OpenClaw plugin API compatibility - Change tool definitions from inputSchema/handler to parameters/execute - Update PluginAPI interface to match OpenClaw's pluginConfig - Make config properties optional with default values - Remove unused entry point and tools array from plugin manifest --- .../cortex-mem-openclaw/openclaw.plugin.json | 9 +- examples/cortex-mem-openclaw/src/index.ts | 99 +++++++++++-------- 2 files changed, 59 insertions(+), 49 deletions(-) diff --git a/examples/cortex-mem-openclaw/openclaw.plugin.json b/examples/cortex-mem-openclaw/openclaw.plugin.json index a37f958..2cf7206 100644 --- a/examples/cortex-mem-openclaw/openclaw.plugin.json +++ b/examples/cortex-mem-openclaw/openclaw.plugin.json @@ -4,7 +4,6 @@ "version": "0.1.0", "description": "Layered semantic memory with L0/L1/L2 tiered retrieval for OpenClaw agents", "kind": "memory", - "entry": "dist/index.js", "configSchema": { "type": "object", "properties": { @@ -52,11 +51,5 @@ "label": "Minimum Score", "description": "Filter results below this relevance threshold" } - }, - "tools": [ - "cortex_search", - "cortex_recall", - "cortex_add_memory", - "cortex_list_sessions" - ] + } } diff --git a/examples/cortex-mem-openclaw/src/index.ts b/examples/cortex-mem-openclaw/src/index.ts index 90aa498..5f9836c 100644 --- a/examples/cortex-mem-openclaw/src/index.ts +++ b/examples/cortex-mem-openclaw/src/index.ts @@ -29,58 +29,75 @@ import { toolSchemas, type CortexSearchInput, type CortexRecallInput, type Corte // Plugin configuration interface PluginConfig { - serviceUrl: string; - defaultSessionId: string; - searchLimit: number; - minScore: number; + serviceUrl?: string; + defaultSessionId?: string; + searchLimit?: number; + minScore?: number; } -// OpenClaw Plugin API types (minimal definitions) -interface PluginAPI { - getConfig(): PluginConfig; - registerTool(tool: ToolDefinition): void; - logger: { - info: (msg: string, ...args: unknown[]) => void; - warn: (msg: string, ...args: unknown[]) => void; - error: (msg: string, ...args: unknown[]) => void; - }; +// OpenClaw Plugin API types — aligned with OpenClawPluginApi in openclaw/src/plugins/types.ts +interface PluginLogger { + debug?: (msg: string, ...args: unknown[]) => void; + info: (msg: string, ...args: unknown[]) => void; + warn: (msg: string, ...args: unknown[]) => void; + error: (msg: string, ...args: unknown[]) => void; } interface ToolDefinition { name: string; description: string; - inputSchema: object; - handler: (args: Record) => Promise; + /** + * JSON Schema for tool inputs. + * OpenClaw uses 'parameters', NOT 'inputSchema'. + */ + parameters: object; + /** + * Tool execution function. + * OpenClaw uses 'execute(_id, params)', NOT 'handler(args)'. + */ + execute: (_id: string, params: Record) => Promise; + /** Optional: mark tool as opt-in (not auto-enabled) */ + optional?: boolean; } -interface ToolResult { - content?: string; - error?: string; - [key: string]: unknown; +// Matches OpenClawPluginApi interface +interface PluginAPI { + /** + * Plugin-specific configuration from openclaw.json + * Access via api.pluginConfig, NOT api.getConfig() + */ + pluginConfig?: Record; + registerTool(tool: ToolDefinition, opts?: { optional?: boolean }): void; + logger: PluginLogger; } -// Export plugin +// Export plugin as a default function — matches OpenClaw's resolvePluginModuleExport behavior export default function cortexMemPlugin(api: PluginAPI) { - const config = api.getConfig(); - const client = new CortexMemClient(config.serviceUrl); + const config = (api.pluginConfig ?? {}) as PluginConfig; + const serviceUrl = config.serviceUrl ?? 'http://127.0.0.1:8085'; + const defaultSessionId = config.defaultSessionId ?? 'default'; + const searchLimit = config.searchLimit ?? 10; + const minScore = config.minScore ?? 0.6; + + const client = new CortexMemClient(serviceUrl); api.logger.info('Cortex Memory plugin initializing...'); - api.logger.info(`Service URL: ${config.serviceUrl}`); + api.logger.info(`Service URL: ${serviceUrl}`); // Register cortex_search tool api.registerTool({ name: toolSchemas.cortex_search.name, description: toolSchemas.cortex_search.description, - inputSchema: toolSchemas.cortex_search.inputSchema, - handler: async (args: Record) => { - const input = args as CortexSearchInput; + parameters: toolSchemas.cortex_search.inputSchema, + execute: async (_id: string, params: Record) => { + const input = params as CortexSearchInput; try { const results = await client.search({ query: input.query, thread: input.scope, - limit: input.limit ?? config.searchLimit, - min_score: input.min_score ?? config.minScore, + limit: input.limit ?? searchLimit, + min_score: input.min_score ?? minScore, }); const formattedResults = results @@ -108,9 +125,9 @@ export default function cortexMemPlugin(api: PluginAPI) { api.registerTool({ name: toolSchemas.cortex_recall.name, description: toolSchemas.cortex_recall.description, - inputSchema: toolSchemas.cortex_recall.inputSchema, - handler: async (args: Record) => { - const input = args as CortexRecallInput; + parameters: toolSchemas.cortex_recall.inputSchema, + execute: async (_id: string, params: Record) => { + const input = params as CortexRecallInput; try { const results = await client.recall( @@ -163,21 +180,21 @@ export default function cortexMemPlugin(api: PluginAPI) { api.registerTool({ name: toolSchemas.cortex_add_memory.name, description: toolSchemas.cortex_add_memory.description, - inputSchema: toolSchemas.cortex_add_memory.inputSchema, - handler: async (args: Record) => { - const input = args as CortexAddMemoryInput; + parameters: toolSchemas.cortex_add_memory.inputSchema, + execute: async (_id: string, params: Record) => { + const input = params as CortexAddMemoryInput; try { - const sessionId = input.session_id ?? config.defaultSessionId; - const messageUri = await client.addMessage(sessionId, { + const sessionId = input.session_id ?? defaultSessionId; + const result = await client.addMessage(sessionId, { role: input.role ?? 'user', content: input.content, }); return { - content: `Memory stored successfully in session "${sessionId}".\nURI: ${messageUri}`, + content: `Memory stored successfully in session "${sessionId}".\nResult: ${result}`, success: true, - message_uri: messageUri, + message_uri: result, }; } catch (error) { const message = error instanceof Error ? error.message : String(error); @@ -191,8 +208,8 @@ export default function cortexMemPlugin(api: PluginAPI) { api.registerTool({ name: toolSchemas.cortex_list_sessions.name, description: toolSchemas.cortex_list_sessions.description, - inputSchema: toolSchemas.cortex_list_sessions.inputSchema, - handler: async () => { + parameters: toolSchemas.cortex_list_sessions.inputSchema, + execute: async (_id: string, _params: Record) => { try { const sessions = await client.listSessions(); @@ -233,7 +250,7 @@ export default function cortexMemPlugin(api: PluginAPI) { }; } -// Also support object export style +// Also support object export style (register method calls the default function above) export const plugin = { id: 'cortex-mem', name: 'Cortex Memory',